=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://34.66.220.108 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 32 tests === RUN kuttl/harness === RUN kuttl/harness/operator-self-healing === PAUSE kuttl/harness/operator-self-healing === CONT kuttl/harness/operator-self-healing logger.go:42: 15:39:53 | operator-self-healing | Creating namespace: kuttl-test-divine-python logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | + source ../../functions logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ realpath ../../.. logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | ++++ pwd logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | ++ test_name=operator-self-healing logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ GIT_BRANCH=PR-713 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ VERSION=PR-713-b294b530 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | ++++ which gdate logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | ++++ which date logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ command -v oc logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ kubectl get nodes logger.go:42: 15:39:53 | operator-self-healing/0-deploy-operator | +++ grep '^minikube' logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | + init_temp_dir logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | + rm -rf /tmp/kuttl/ps/operator-self-healing logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/operator-self-healing logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | + deploy_operator logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | + destroy_operator logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | + true logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 15:39:54 | operator-self-healing/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 15:39:55 | operator-self-healing/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 15:39:55 | operator-self-healing/0-deploy-operator | + true logger.go:42: 15:39:55 | operator-self-healing/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 15:39:55 | operator-self-healing/0-deploy-operator | + create_namespace ps-operator logger.go:42: 15:39:55 | operator-self-healing/0-deploy-operator | + local namespace=ps-operator logger.go:42: 15:39:55 | operator-self-healing/0-deploy-operator | + [[ -n '' ]] logger.go:42: 15:39:55 | operator-self-healing/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 15:39:55 | operator-self-healing/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 15:39:55 | operator-self-healing/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 15:39:56 | operator-self-healing/0-deploy-operator | namespace/ps-operator created logger.go:42: 15:39:56 | operator-self-healing/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy/crd.yaml logger.go:42: 15:39:57 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 15:39:57 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 15:39:58 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 15:39:58 | operator-self-healing/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 15:39:58 | operator-self-healing/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy/cw-rbac.yaml logger.go:42: 15:39:59 | operator-self-healing/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 15:39:59 | operator-self-healing/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 15:39:59 | operator-self-healing/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 15:39:59 | operator-self-healing/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 15:40:00 | operator-self-healing/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 15:40:00 | operator-self-healing/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 15:40:00 | operator-self-healing/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 15:40:00 | operator-self-healing/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 15:40:00 | operator-self-healing/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:40:00 | operator-self-healing/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-713-b294b530"' /mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy/cw-operator.yaml logger.go:42: 15:40:01 | operator-self-healing/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 15:40:01 | operator-self-healing/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 15:40:01 | operator-self-healing/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 15:40:01 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-divine-python apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf/secrets.yaml logger.go:42: 15:40:02 | operator-self-healing/0-deploy-operator | secret/test-secrets created logger.go:42: 15:40:02 | operator-self-healing/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 15:40:02 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-divine-python apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf/ssl-secret.yaml logger.go:42: 15:40:03 | operator-self-healing/0-deploy-operator | secret/test-ssl created logger.go:42: 15:40:03 | operator-self-healing/0-deploy-operator | + deploy_client logger.go:42: 15:40:03 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-divine-python apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf/client.yaml logger.go:42: 15:40:04 | operator-self-healing/0-deploy-operator | pod/mysql-client created logger.go:42: 15:40:04 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:40:04 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:40:04 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 15:40:06 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:40:06 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:40:06 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 15:40:08 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:40:08 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:40:08 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 15:40:10 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:40:10 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:40:10 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 15:40:11 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:40:11 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:40:12 | operator-self-healing/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 15:40:12 | operator-self-healing/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 15:40:12 | operator-self-healing/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 15:40:12 | operator-self-healing/0-deploy-operator | ASSERT PASS logger.go:42: 15:40:12 | operator-self-healing/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | starting test step 1-deploy-chaos-mesh logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_chaos_mesh] logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | + source ../../functions logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ realpath ../../.. logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | ++++ pwd logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | ++ test_name=operator-self-healing logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ GIT_BRANCH=PR-713 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ VERSION=PR-713-b294b530 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | ++++ which gdate logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | ++++ which date logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ date=/usr/bin/date logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ command -v oc logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ kubectl get nodes logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | +++ grep '^minikube' logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | + deploy_chaos_mesh logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | + destroy_chaos_mesh logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | ++ tail -n1 logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | ++ sed s/NAMESPACE// logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '-F ' '{print $2}' logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-713/kubeconfig logger.go:42: 15:40:12 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-713/kubeconfig logger.go:42: 15:40:13 | operator-self-healing/1-deploy-chaos-mesh | + local chaos_mesh_ns= logger.go:42: 15:40:13 | operator-self-healing/1-deploy-chaos-mesh | + '[' -n '' ']' logger.go:42: 15:40:13 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration logger.go:42: 15:40:13 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:40:13 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:40:13 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration logger.go:42: 15:40:13 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:40:13 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 15:40:13 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 15:40:13 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:40:13 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | ++ grep validate-auth logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl api-resources logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:40:14 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:40:15 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get crd logger.go:42: 15:40:15 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh.org logger.go:42: 15:40:15 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:40:15 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete crd logger.go:42: 15:40:15 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:40:15 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 15:40:15 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrolebinding logger.go:42: 15:40:15 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:40:15 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:40:16 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding logger.go:42: 15:40:16 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:40:16 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 15:40:16 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrole logger.go:42: 15:40:16 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:40:16 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:40:17 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrole logger.go:42: 15:40:17 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:40:17 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 15:40:17 | operator-self-healing/1-deploy-chaos-mesh | + helm repo add chaos-mesh https://charts.chaos-mesh.org logger.go:42: 15:40:17 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-713/kubeconfig logger.go:42: 15:40:17 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-713/kubeconfig logger.go:42: 15:40:17 | operator-self-healing/1-deploy-chaos-mesh | "chaos-mesh" has been added to your repositories logger.go:42: 15:40:17 | operator-self-healing/1-deploy-chaos-mesh | + '[' -n '' ']' logger.go:42: 15:40:17 | operator-self-healing/1-deploy-chaos-mesh | + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=kuttl-test-divine-python --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 logger.go:42: 15:40:17 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-713/kubeconfig logger.go:42: 15:40:17 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-713/kubeconfig logger.go:42: 15:40:44 | operator-self-healing/1-deploy-chaos-mesh | NAME: chaos-mesh logger.go:42: 15:40:44 | operator-self-healing/1-deploy-chaos-mesh | LAST DEPLOYED: Wed Jul 31 15:40:31 2024 logger.go:42: 15:40:44 | operator-self-healing/1-deploy-chaos-mesh | NAMESPACE: kuttl-test-divine-python logger.go:42: 15:40:44 | operator-self-healing/1-deploy-chaos-mesh | STATUS: deployed logger.go:42: 15:40:44 | operator-self-healing/1-deploy-chaos-mesh | REVISION: 1 logger.go:42: 15:40:44 | operator-self-healing/1-deploy-chaos-mesh | TEST SUITE: None logger.go:42: 15:40:44 | operator-self-healing/1-deploy-chaos-mesh | NOTES: logger.go:42: 15:40:44 | operator-self-healing/1-deploy-chaos-mesh | 1. Make sure chaos-mesh components are running logger.go:42: 15:40:44 | operator-self-healing/1-deploy-chaos-mesh | kubectl get pods --namespace kuttl-test-divine-python -l app.kubernetes.io/instance=chaos-mesh logger.go:42: 15:40:44 | operator-self-healing/1-deploy-chaos-mesh | + sleep 10 [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 13 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc000299c00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc000299c00}, 0x0}, {0x184a055?, 0xc000991f80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc00042caf0, {0x1accd90, 0xc0003a06c0}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc000348008?, {0x0, 0xc00042caf0, {0x1accd90, 0xc0003a06c0}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc000348008, {0x0, 0xc00042caf0, {0x1accd90, 0xc0003a06c0}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc000336c08, 0x47?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc0005b9a00, 0xc0003aa680, {0xc00046c510, 0x18}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc0005b9a00, 0xc0003aa680, {0xc00046c510, 0x18}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc000427360, 0xc0003aa680, 0xc0004fa090) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc0003aa680) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc0003aa680, 0xc00039e5b8) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 12 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 15:40:55 | operator-self-healing/1-deploy-chaos-mesh | test step completed 1-deploy-chaos-mesh logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | starting test step 2-create-cluster logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + source ../../functions logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ realpath ../../.. logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++++ pwd logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ test_name=operator-self-healing logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ GIT_BRANCH=PR-713 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ VERSION=PR-713-b294b530 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++++ which gdate logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++++ which date logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ date=/usr/bin/date logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ command -v oc logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ kubectl get nodes logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | +++ grep '^minikube' logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + get_cr logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + local name_suffix= logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy/cr.yaml logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + kubectl -n kuttl-test-divine-python apply -f - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-713-b294b530"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + '[' -n '' ']' logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 15:40:55 | operator-self-healing/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 15:40:56 | operator-self-healing/2-create-cluster | perconaservermysql.ps.percona.com/operator-self-healing created logger.go:42: 15:44:15 | operator-self-healing/2-create-cluster | test step completed 2-create-cluster logger.go:42: 15:44:15 | operator-self-healing/3-write-data | starting test step 3-write-data logger.go:42: 15:44:15 | operator-self-healing/3-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" sleep 5] logger.go:42: 15:44:15 | operator-self-healing/3-write-data | + source ../../functions logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ realpath ../../.. logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++++ pwd logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++ test_name=operator-self-healing logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ GIT_BRANCH=PR-713 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ VERSION=PR-713-b294b530 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++++ which gdate logger.go:42: 15:44:15 | operator-self-healing/3-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++++ which date logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ date=/usr/bin/date logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ command -v oc logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ kubectl get nodes logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ grep '^minikube' logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ get_cluster_name logger.go:42: 15:44:15 | operator-self-healing/3-write-data | +++ kubectl -n kuttl-test-divine-python get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++ get_haproxy_svc operator-self-healing logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++ local cluster=operator-self-healing logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++ echo operator-self-healing-haproxy logger.go:42: 15:44:15 | operator-self-healing/3-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:44:15 | operator-self-healing/3-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 15:44:15 | operator-self-healing/3-write-data | + local 'uri=-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:44:15 | operator-self-healing/3-write-data | + local pod= logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++ get_client_pod logger.go:42: 15:44:15 | operator-self-healing/3-write-data | ++ kubectl -n kuttl-test-divine-python get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:44:16 | operator-self-healing/3-write-data | + client_pod=mysql-client logger.go:42: 15:44:16 | operator-self-healing/3-write-data | + wait_pod mysql-client logger.go:42: 15:44:16 | operator-self-healing/3-write-data | + local pod=mysql-client logger.go:42: 15:44:16 | operator-self-healing/3-write-data | + set +o xtrace logger.go:42: 15:44:16 | operator-self-healing/3-write-data | mysql-clienttrue logger.go:42: 15:44:16 | operator-self-healing/3-write-data | + sed -e 's/mysql: //' logger.go:42: 15:44:16 | operator-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:44:16 | operator-self-healing/3-write-data | + kubectl -n kuttl-test-divine-python exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:44:18 | operator-self-healing/3-write-data | + : logger.go:42: 15:44:18 | operator-self-healing/3-write-data | +++ get_cluster_name logger.go:42: 15:44:18 | operator-self-healing/3-write-data | +++ kubectl -n kuttl-test-divine-python get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:44:18 | operator-self-healing/3-write-data | ++ get_haproxy_svc operator-self-healing logger.go:42: 15:44:18 | operator-self-healing/3-write-data | ++ local cluster=operator-self-healing logger.go:42: 15:44:18 | operator-self-healing/3-write-data | ++ echo operator-self-healing-haproxy logger.go:42: 15:44:18 | operator-self-healing/3-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:44:18 | operator-self-healing/3-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 15:44:18 | operator-self-healing/3-write-data | + local 'uri=-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:44:18 | operator-self-healing/3-write-data | + local pod= logger.go:42: 15:44:18 | operator-self-healing/3-write-data | ++ get_client_pod logger.go:42: 15:44:18 | operator-self-healing/3-write-data | ++ kubectl -n kuttl-test-divine-python get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:44:19 | operator-self-healing/3-write-data | + client_pod=mysql-client logger.go:42: 15:44:19 | operator-self-healing/3-write-data | + wait_pod mysql-client logger.go:42: 15:44:19 | operator-self-healing/3-write-data | + local pod=mysql-client logger.go:42: 15:44:19 | operator-self-healing/3-write-data | + set +o xtrace logger.go:42: 15:44:19 | operator-self-healing/3-write-data | mysql-clienttrue logger.go:42: 15:44:19 | operator-self-healing/3-write-data | + kubectl -n kuttl-test-divine-python exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:44:19 | operator-self-healing/3-write-data | + sed -e 's/mysql: //' logger.go:42: 15:44:19 | operator-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:44:20 | operator-self-healing/3-write-data | + : logger.go:42: 15:44:20 | operator-self-healing/3-write-data | + sleep 5 logger.go:42: 15:44:25 | operator-self-healing/3-write-data | test step completed 3-write-data logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | starting test step 4-read-from-primary logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_mysql "SELECT * FROM myDB.myTable" "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 04-read-from-primary --from-literal=data="${data}"] logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | + source ../../functions logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ realpath ../../.. logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | ++++ pwd logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | ++ test_name=operator-self-healing logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ GIT_BRANCH=PR-713 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ VERSION=PR-713-b294b530 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | ++++ which gdate logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | ++++ which date logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ date=/usr/bin/date logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ command -v oc logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ kubectl get nodes logger.go:42: 15:44:25 | operator-self-healing/4-read-from-primary | +++ grep '^minikube' logger.go:42: 15:44:26 | operator-self-healing/4-read-from-primary | ++++ get_cluster_name logger.go:42: 15:44:26 | operator-self-healing/4-read-from-primary | ++++ kubectl -n kuttl-test-divine-python get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:44:26 | operator-self-healing/4-read-from-primary | +++ get_haproxy_svc operator-self-healing logger.go:42: 15:44:26 | operator-self-healing/4-read-from-primary | +++ local cluster=operator-self-healing logger.go:42: 15:44:26 | operator-self-healing/4-read-from-primary | +++ echo operator-self-healing-haproxy logger.go:42: 15:44:26 | operator-self-healing/4-read-from-primary | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:44:26 | operator-self-healing/4-read-from-primary | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:44:26 | operator-self-healing/4-read-from-primary | ++ local 'uri=-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:44:26 | operator-self-healing/4-read-from-primary | ++ local pod= logger.go:42: 15:44:26 | operator-self-healing/4-read-from-primary | +++ get_client_pod logger.go:42: 15:44:26 | operator-self-healing/4-read-from-primary | +++ kubectl -n kuttl-test-divine-python get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:44:27 | operator-self-healing/4-read-from-primary | ++ client_pod=mysql-client logger.go:42: 15:44:27 | operator-self-healing/4-read-from-primary | ++ wait_pod mysql-client logger.go:42: 15:44:27 | operator-self-healing/4-read-from-primary | ++ local pod=mysql-client logger.go:42: 15:44:27 | operator-self-healing/4-read-from-primary | ++ set +o xtrace logger.go:42: 15:44:27 | operator-self-healing/4-read-from-primary | mysql-clienttrue logger.go:42: 15:44:27 | operator-self-healing/4-read-from-primary | ++ kubectl -n kuttl-test-divine-python exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:44:27 | operator-self-healing/4-read-from-primary | ++ sed -e 's/mysql: //' logger.go:42: 15:44:27 | operator-self-healing/4-read-from-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:44:29 | operator-self-healing/4-read-from-primary | + data=100500 logger.go:42: 15:44:29 | operator-self-healing/4-read-from-primary | + kubectl create configmap -n kuttl-test-divine-python 04-read-from-primary --from-literal=data=100500 logger.go:42: 15:44:29 | operator-self-healing/4-read-from-primary | configmap/04-read-from-primary created logger.go:42: 15:44:29 | operator-self-healing/4-read-from-primary | test step completed 4-read-from-primary logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | starting test step 5-kill-pod logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_pod=$(get_operator_pod) kill_pods "${OPERATOR_NS:-$NAMESPACE}" "pod" "$init_pod" "" "operator" sleep 10 # wait a bit for pod to be killed wait_deployment percona-server-mysql-operator "${OPERATOR_NS:-$NAMESPACE}" if [ "$init_pod" == "$(get_operator_pod)" ]; then echo "operator pod was not killed! something went wrong." exit 1 fi] logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | + source ../../functions logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ realpath ../../.. logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | ++++ pwd logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | ++ test_name=operator-self-healing logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ GIT_BRANCH=PR-713 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ VERSION=PR-713-b294b530 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | ++++ which gdate logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | ++++ which date logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ date=/usr/bin/date logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ command -v oc logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ kubectl get nodes logger.go:42: 15:44:29 | operator-self-healing/5-kill-pod | +++ grep '^minikube' logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | ++ get_operator_pod logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | ++ kubectl get pods -n ps-operator --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | + init_pod=percona-server-mysql-operator-7d69f845cb-2zcvj logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | + kill_pods ps-operator pod percona-server-mysql-operator-7d69f845cb-2zcvj '' operator logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | + local ns=ps-operator logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | + local selector=pod logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | + local pod_label=percona-server-mysql-operator-7d69f845cb-2zcvj logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | + local label_value= logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | + local chaos_suffix=operator logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | + '[' pod == pod ']' logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | + kubectl apply --namespace ps-operator -f - logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | + yq eval ' logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | .metadata.name = "chaos-pod-kill-operator" | logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | del(.spec.selector.pods.test-namespace) | logger.go:42: 15:44:30 | operator-self-healing/5-kill-pod | .spec.selector.pods.ps-operator[0] = "percona-server-mysql-operator-7d69f845cb-2zcvj"' /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf/chaos-pod-kill.yml logger.go:42: 15:44:32 | operator-self-healing/5-kill-pod | podchaos.chaos-mesh.org/chaos-pod-kill-operator created logger.go:42: 15:44:32 | operator-self-healing/5-kill-pod | + sleep 5 logger.go:42: 15:44:37 | operator-self-healing/5-kill-pod | + sleep 10 logger.go:42: 15:44:47 | operator-self-healing/5-kill-pod | + wait_deployment percona-server-mysql-operator ps-operator logger.go:42: 15:44:47 | operator-self-healing/5-kill-pod | + local name=percona-server-mysql-operator logger.go:42: 15:44:47 | operator-self-healing/5-kill-pod | + local target_namespace=ps-operator logger.go:42: 15:44:47 | operator-self-healing/5-kill-pod | + sleep 10 logger.go:42: 15:44:57 | operator-self-healing/5-kill-pod | + set +o xtrace logger.go:42: 15:44:58 | operator-self-healing/5-kill-pod | percona-server-mysql-operator logger.go:42: 15:44:58 | operator-self-healing/5-kill-pod | ++ get_operator_pod logger.go:42: 15:44:58 | operator-self-healing/5-kill-pod | ++ kubectl get pods -n ps-operator --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:44:58 | operator-self-healing/5-kill-pod | + '[' percona-server-mysql-operator-7d69f845cb-2zcvj == percona-server-mysql-operator-7d69f845cb-d557t ']' logger.go:42: 15:44:59 | operator-self-healing/5-kill-pod | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:44:59 | operator-self-healing/5-kill-pod | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:44:59 | operator-self-healing/5-kill-pod | INFO Found 1 resource(s). logger.go:42: 15:44:59 | operator-self-healing/5-kill-pod | NAME NAMESPACE COL0 logger.go:42: 15:44:59 | operator-self-healing/5-kill-pod | percona-server-mysql-operator ps-operator 1 logger.go:42: 15:44:59 | operator-self-healing/5-kill-pod | ASSERT PASS logger.go:42: 15:44:59 | operator-self-healing/5-kill-pod | test step completed 5-kill-pod logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | starting test step 6-scale-up logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=5' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | + source ../../functions logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ realpath ../../.. logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | ++++ pwd logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | ++ test_name=operator-self-healing logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ GIT_BRANCH=PR-713 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ VERSION=PR-713-b294b530 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | ++++ which gdate logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | ++++ which date logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ date=/usr/bin/date logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ command -v oc logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ kubectl get nodes logger.go:42: 15:44:59 | operator-self-healing/6-scale-up | +++ grep '^minikube' logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + get_cr logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + local name_suffix= logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval .spec.mysql.size=3 - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + kubectl -n kuttl-test-divine-python apply -f - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval .spec.orchestrator.size=3 - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval .spec.proxy.haproxy.size=5 - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + '[' -n '' ']' logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy/cr.yaml logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:45:00 | operator-self-healing/6-scale-up | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-713-b294b530"' - logger.go:42: 15:45:01 | operator-self-healing/6-scale-up | perconaservermysql.ps.percona.com/operator-self-healing configured logger.go:42: 15:45:16 | operator-self-healing/6-scale-up | test step completed 6-scale-up logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | starting test step 7-network-loss logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | running command: [sh -c set -o errexit set -o xtrace source ../../functions network_loss "${OPERATOR_NS:-$NAMESPACE}" "$(get_operator_pod)" "operator" sleep 30 # wait for network loss to happen] logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | + source ../../functions logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ realpath ../../.. logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | ++++ pwd logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | ++ test_name=operator-self-healing logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ GIT_BRANCH=PR-713 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ VERSION=PR-713-b294b530 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | ++++ which gdate logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | ++++ which date logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ date=/usr/bin/date logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ command -v oc logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ kubectl get nodes logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | +++ grep '^minikube' logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | ++ get_operator_pod logger.go:42: 15:45:16 | operator-self-healing/7-network-loss | ++ kubectl get pods -n ps-operator --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:45:17 | operator-self-healing/7-network-loss | + network_loss ps-operator percona-server-mysql-operator-7d69f845cb-d557t operator logger.go:42: 15:45:17 | operator-self-healing/7-network-loss | + local ns=ps-operator logger.go:42: 15:45:17 | operator-self-healing/7-network-loss | + local pod=percona-server-mysql-operator-7d69f845cb-d557t logger.go:42: 15:45:17 | operator-self-healing/7-network-loss | + local chaos_suffix=operator logger.go:42: 15:45:17 | operator-self-healing/7-network-loss | + kubectl apply --namespace ps-operator -f - logger.go:42: 15:45:17 | operator-self-healing/7-network-loss | + yq eval ' logger.go:42: 15:45:17 | operator-self-healing/7-network-loss | .metadata.name = "chaos-pod-network-loss-operator" | logger.go:42: 15:45:17 | operator-self-healing/7-network-loss | del(.spec.selector.pods.test-namespace) | logger.go:42: 15:45:17 | operator-self-healing/7-network-loss | .spec.selector.pods.ps-operator[0] = "percona-server-mysql-operator-7d69f845cb-d557t"' /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf/chaos-network-loss.yml logger.go:42: 15:45:18 | operator-self-healing/7-network-loss | networkchaos.chaos-mesh.org/chaos-pod-network-loss-operator created logger.go:42: 15:45:18 | operator-self-healing/7-network-loss | + sleep 5 logger.go:42: 15:45:23 | operator-self-healing/7-network-loss | + sleep 30 logger.go:42: 15:45:53 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:45:53 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:45:54 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:45:55 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:45:55 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:45:56 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:45:57 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:45:57 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:45:57 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:45:58 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:45:58 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:45:59 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:00 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:00 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:01 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:02 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:02 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:02 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:03 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:03 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:04 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:05 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:05 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:05 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:07 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:07 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:07 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:08 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:08 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:09 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:10 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:10 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:10 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:12 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:12 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:12 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:13 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:13 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:14 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:15 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:15 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:15 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:17 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:17 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:17 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:18 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:18 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:19 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:20 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:20 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:21 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:22 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:22 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:22 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:24 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:24 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:24 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:25 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:25 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:26 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:27 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:27 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:27 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:29 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:29 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:29 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:30 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:30 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:31 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:32 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:32 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:32 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:34 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:34 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:34 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:35 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:35 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:36 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:37 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:37 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:37 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:39 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:39 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:39 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:40 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:40 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:41 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:42 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:42 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:42 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:44 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:44 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:44 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:45 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:45 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:46 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:47 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:47 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:47 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:49 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:49 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:49 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:50 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:50 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:51 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:52 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:52 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:52 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:46:54 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:46:54 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:46:54 | operator-self-healing/7-network-loss | INFO Found 1 resource(s). logger.go:42: 15:46:54 | operator-self-healing/7-network-loss | NAME NAMESPACE COL0 logger.go:42: 15:46:54 | operator-self-healing/7-network-loss | percona-server-mysql-operator ps-operator 1 logger.go:42: 15:46:54 | operator-self-healing/7-network-loss | ASSERT PASS logger.go:42: 15:46:54 | operator-self-healing/7-network-loss | test step completed 7-network-loss logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | starting test step 8-scale-down logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | + source ../../functions logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ realpath ../../.. logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | ++++ pwd logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | ++ test_name=operator-self-healing logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ GIT_BRANCH=PR-713 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ VERSION=PR-713-b294b530 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | ++++ which gdate logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | ++++ which date logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ date=/usr/bin/date logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ command -v oc logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ kubectl get nodes logger.go:42: 15:46:54 | operator-self-healing/8-scale-down | +++ grep '^minikube' logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + get_cr logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + local name_suffix= logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval .spec.mysql.size=3 - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval .spec.orchestrator.size=3 - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + kubectl -n kuttl-test-divine-python apply -f - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + '[' -n '' ']' logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy/cr.yaml logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-713-b294b530"' - logger.go:42: 15:46:55 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:46:56 | operator-self-healing/8-scale-down | perconaservermysql.ps.percona.com/operator-self-healing configured logger.go:42: 15:47:13 | operator-self-healing/8-scale-down | test step completed 8-scale-down logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | starting test step 9-pod-failure logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | running command: [sh -c set -o errexit set -o xtrace source ../../functions failure_pod "${OPERATOR_NS:-$NAMESPACE}" "$(get_operator_pod)" "operator" sleep 30 # wait for pod failure to happen] logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | + source ../../functions logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ realpath ../../.. logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | ++++ pwd logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | ++ test_name=operator-self-healing logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ GIT_BRANCH=PR-713 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ VERSION=PR-713-b294b530 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | ++++ which gdate logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | ++++ which date logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ date=/usr/bin/date logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ command -v oc logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ kubectl get nodes logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | +++ grep '^minikube' logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | ++ get_operator_pod logger.go:42: 15:47:13 | operator-self-healing/9-pod-failure | ++ kubectl get pods -n ps-operator --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:47:14 | operator-self-healing/9-pod-failure | + failure_pod ps-operator percona-server-mysql-operator-7d69f845cb-d557t operator logger.go:42: 15:47:14 | operator-self-healing/9-pod-failure | + local ns=ps-operator logger.go:42: 15:47:14 | operator-self-healing/9-pod-failure | + local pod=percona-server-mysql-operator-7d69f845cb-d557t logger.go:42: 15:47:14 | operator-self-healing/9-pod-failure | + local chaos_suffix=operator logger.go:42: 15:47:14 | operator-self-healing/9-pod-failure | + yq eval ' logger.go:42: 15:47:14 | operator-self-healing/9-pod-failure | .metadata.name = "chaos-pod-failure-operator" | logger.go:42: 15:47:14 | operator-self-healing/9-pod-failure | del(.spec.selector.pods.test-namespace) | logger.go:42: 15:47:14 | operator-self-healing/9-pod-failure | .spec.selector.pods.ps-operator[0] = "percona-server-mysql-operator-7d69f845cb-d557t"' /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf/chaos-pod-failure.yml logger.go:42: 15:47:14 | operator-self-healing/9-pod-failure | + kubectl apply --namespace ps-operator -f - logger.go:42: 15:47:15 | operator-self-healing/9-pod-failure | podchaos.chaos-mesh.org/chaos-pod-failure-operator created logger.go:42: 15:47:15 | operator-self-healing/9-pod-failure | + sleep 5 logger.go:42: 15:47:20 | operator-self-healing/9-pod-failure | + sleep 30 logger.go:42: 15:47:50 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:47:50 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:47:51 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:47:52 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:47:52 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:47:52 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:47:54 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:47:54 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:47:54 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:47:55 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:47:56 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:47:56 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:47:57 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:47:57 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:47:58 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:47:59 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:47:59 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:47:59 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:00 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:00 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:01 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:02 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:02 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:03 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:04 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:04 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:04 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:05 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:05 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:06 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:07 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:07 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:08 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:09 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:09 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:09 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:11 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:11 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:11 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:12 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:12 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:13 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:14 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:14 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:14 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:16 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:16 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:16 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:17 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:17 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:18 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:19 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:19 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:19 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:21 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:21 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:21 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:22 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:22 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:23 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:48:24 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:48:24 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:48:24 | operator-self-healing/9-pod-failure | INFO Found 1 resource(s). logger.go:42: 15:48:24 | operator-self-healing/9-pod-failure | NAME NAMESPACE COL0 logger.go:42: 15:48:24 | operator-self-healing/9-pod-failure | percona-server-mysql-operator ps-operator 1 logger.go:42: 15:48:24 | operator-self-healing/9-pod-failure | ASSERT PASS logger.go:42: 15:48:24 | operator-self-healing/9-pod-failure | test step completed 9-pod-failure logger.go:42: 15:48:24 | operator-self-healing/10-scale-up | starting test step 10-scale-up logger.go:42: 15:48:24 | operator-self-healing/10-scale-up | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=5' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + source ../../functions logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ realpath ../../.. logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++++ pwd logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ test_name=operator-self-healing logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ GIT_BRANCH=PR-713 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ VERSION=PR-713-b294b530 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++++ which gdate logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++++ which date logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ date=/usr/bin/date logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ command -v oc logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ kubectl get nodes logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | +++ grep '^minikube' logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + get_cr logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + local name_suffix= logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval .spec.mysql.size=3 - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval .spec.orchestrator.size=3 - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + kubectl -n kuttl-test-divine-python apply -f - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-713-b294b530"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval .spec.proxy.haproxy.size=5 - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy/cr.yaml logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + '[' -n '' ']' logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 15:48:25 | operator-self-healing/10-scale-up | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 15:48:26 | operator-self-healing/10-scale-up | perconaservermysql.ps.percona.com/operator-self-healing configured logger.go:42: 15:48:45 | operator-self-healing/10-scale-up | test step completed 10-scale-up logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | starting test step 11-destroy-chaos-mesh logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_chaos_mesh] logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | + source ../../functions logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ realpath ../../.. logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | ++++ pwd logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | ++ test_name=operator-self-healing logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ GIT_BRANCH=PR-713 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ VERSION=PR-713-b294b530 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | ++++ which gdate logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | ++++ which date logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ date=/usr/bin/date logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ command -v oc logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ kubectl get nodes logger.go:42: 15:48:45 | operator-self-healing/11-destroy-chaos-mesh | +++ grep '^minikube' logger.go:42: 15:48:46 | operator-self-healing/11-destroy-chaos-mesh | + destroy_chaos_mesh logger.go:42: 15:48:46 | operator-self-healing/11-destroy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh logger.go:42: 15:48:46 | operator-self-healing/11-destroy-chaos-mesh | ++ tail -n1 logger.go:42: 15:48:46 | operator-self-healing/11-destroy-chaos-mesh | ++ sed s/NAMESPACE// logger.go:42: 15:48:46 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '-F ' '{print $2}' logger.go:42: 15:48:46 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-713/kubeconfig logger.go:42: 15:48:46 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-713/kubeconfig logger.go:42: 15:48:47 | operator-self-healing/11-destroy-chaos-mesh | + local chaos_mesh_ns=kuttl-test-divine-python logger.go:42: 15:48:47 | operator-self-healing/11-destroy-chaos-mesh | + '[' -n kuttl-test-divine-python ']' logger.go:42: 15:48:47 | operator-self-healing/11-destroy-chaos-mesh | + helm uninstall --wait --timeout 60s chaos-mesh --namespace kuttl-test-divine-python logger.go:42: 15:48:47 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-713/kubeconfig logger.go:42: 15:48:47 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-713/kubeconfig logger.go:42: 15:48:56 | operator-self-healing/11-destroy-chaos-mesh | release "chaos-mesh" uninstalled logger.go:42: 15:48:56 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration logger.go:42: 15:48:56 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:48:56 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:48:56 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration logger.go:42: 15:48:56 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:48:56 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 15:48:56 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 15:48:56 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:48:56 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | ++ grep validate-auth logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl api-resources logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:48:57 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:48:58 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:48:58 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:48:58 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:48:58 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete awschaos --all --all-namespaces logger.go:42: 15:48:58 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:48:58 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:48:58 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:48:58 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:48:59 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete azurechaos --all --all-namespaces logger.go:42: 15:48:59 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:48:59 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:48:59 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:48:59 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:00 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete blockchaos --all --all-namespaces logger.go:42: 15:49:00 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:00 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:00 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:00 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:00 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete dnschaos --all --all-namespaces logger.go:42: 15:49:01 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:01 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:01 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:01 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:01 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete gcpchaos --all --all-namespaces logger.go:42: 15:49:01 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:01 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:01 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:01 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:02 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete httpchaos --all --all-namespaces logger.go:42: 15:49:02 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:02 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:02 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:02 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:02 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete iochaos --all --all-namespaces logger.go:42: 15:49:03 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:03 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:03 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:03 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:03 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete jvmchaos --all --all-namespaces logger.go:42: 15:49:04 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:04 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:04 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:04 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:04 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete kernelchaos --all --all-namespaces logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-operator ps-operator' logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | + local kind=NetworkChaos logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-operator ps-operator' logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | + local name=chaos-pod-network-loss-operator logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-operator ps-operator' logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | + local namespace=ps-operator logger.go:42: 15:49:05 | operator-self-healing/11-destroy-chaos-mesh | + kubectl patch NetworkChaos chaos-pod-network-loss-operator -n ps-operator --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 15:49:06 | operator-self-healing/11-destroy-chaos-mesh | networkchaos.chaos-mesh.org/chaos-pod-network-loss-operator patched logger.go:42: 15:49:06 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:06 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete networkchaos --all --all-namespaces logger.go:42: 15:49:06 | operator-self-healing/11-destroy-chaos-mesh | networkchaos.chaos-mesh.org "chaos-pod-network-loss-operator" deleted logger.go:42: 15:49:06 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:06 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:06 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:07 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces logger.go:42: 15:49:07 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:07 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:07 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:07 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:07 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachines --all --all-namespaces logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-operator ps-operator' logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | + local kind=PodChaos logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-operator ps-operator' logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | + local name=chaos-pod-failure-operator logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-operator ps-operator' logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | + local namespace=ps-operator logger.go:42: 15:49:08 | operator-self-healing/11-destroy-chaos-mesh | + kubectl patch PodChaos chaos-pod-failure-operator -n ps-operator --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org/chaos-pod-failure-operator patched logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-operator ps-operator' logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | + local kind=PodChaos logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-operator ps-operator' logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | + local name=chaos-pod-kill-operator logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-operator ps-operator' logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | + local namespace=ps-operator logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | + kubectl patch PodChaos chaos-pod-kill-operator -n ps-operator --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org/chaos-pod-kill-operator patched logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:09 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podchaos --all --all-namespaces logger.go:42: 15:49:10 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-failure-operator" deleted logger.go:42: 15:49:10 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-kill-operator" deleted logger.go:42: 15:49:10 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:10 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:10 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:10 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podhttpchaos --all --all-namespaces logger.go:42: 15:49:11 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:11 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:11 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:11 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:11 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podiochaos --all --all-namespaces logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodNetworkChaos percona-server-mysql-operator-7d69f845cb-d557t ps-operator' logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | + local kind=PodNetworkChaos logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodNetworkChaos percona-server-mysql-operator-7d69f845cb-d557t ps-operator' logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | + local name=percona-server-mysql-operator-7d69f845cb-d557t logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodNetworkChaos percona-server-mysql-operator-7d69f845cb-d557t ps-operator' logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | + local namespace=ps-operator logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | + kubectl patch PodNetworkChaos percona-server-mysql-operator-7d69f845cb-d557t -n ps-operator --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | podnetworkchaos.chaos-mesh.org/percona-server-mysql-operator-7d69f845cb-d557t patched (no change) logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:12 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces logger.go:42: 15:49:13 | operator-self-healing/11-destroy-chaos-mesh | podnetworkchaos.chaos-mesh.org "percona-server-mysql-operator-7d69f845cb-d557t" deleted logger.go:42: 15:49:13 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:13 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:13 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:13 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete remoteclusters --all --all-namespaces logger.go:42: 15:49:14 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:14 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:14 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:14 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:14 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete schedules --all --all-namespaces logger.go:42: 15:49:14 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:14 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:14 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:14 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:15 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete statuschecks --all --all-namespaces logger.go:42: 15:49:15 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:15 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:15 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:15 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:16 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete stresschaos --all --all-namespaces logger.go:42: 15:49:16 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:16 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:16 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:16 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:16 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete timechaos --all --all-namespaces logger.go:42: 15:49:17 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:17 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:17 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:17 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:17 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete workflownodes --all --all-namespaces logger.go:42: 15:49:17 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:17 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:49:17 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 15:49:17 | operator-self-healing/11-destroy-chaos-mesh | + read -r line logger.go:42: 15:49:18 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete workflows --all --all-namespaces logger.go:42: 15:49:18 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:49:18 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get crd logger.go:42: 15:49:18 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh.org logger.go:42: 15:49:18 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:49:19 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org logger.go:42: 15:49:19 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted logger.go:42: 15:49:19 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted logger.go:42: 15:49:20 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted logger.go:42: 15:49:20 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted logger.go:42: 15:49:20 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted logger.go:42: 15:49:20 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted logger.go:42: 15:49:20 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted logger.go:42: 15:49:20 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted logger.go:42: 15:49:20 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted logger.go:42: 15:49:21 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted logger.go:42: 15:49:21 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted logger.go:42: 15:49:21 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted logger.go:42: 15:49:22 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted logger.go:42: 15:49:22 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted logger.go:42: 15:49:22 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted logger.go:42: 15:49:22 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted logger.go:42: 15:49:22 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted logger.go:42: 15:49:24 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted logger.go:42: 15:49:24 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted logger.go:42: 15:49:24 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted logger.go:42: 15:49:25 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted logger.go:42: 15:49:26 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted logger.go:42: 15:49:27 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted logger.go:42: 15:49:31 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get clusterrolebinding logger.go:42: 15:49:31 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:49:31 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:49:32 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding logger.go:42: 15:49:32 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:49:32 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 15:49:32 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get clusterrole logger.go:42: 15:49:32 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:49:32 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:49:32 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrole logger.go:42: 15:49:32 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:49:32 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 15:49:32 | operator-self-healing/11-destroy-chaos-mesh | test step completed 11-destroy-chaos-mesh logger.go:42: 15:49:32 | operator-self-healing/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 15:49:33 | operator-self-healing/98-drop-finalizer | PerconaServerMySQL:kuttl-test-divine-python/operator-self-healing updated logger.go:42: 15:49:33 | operator-self-healing/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/tests/operator-self-healing logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | ++ test_name=operator-self-healing logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/vars.sh logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-713 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/deploy logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-713/e2e-tests/conf logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-713 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-713 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export VERSION=PR-713-b294b530 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ VERSION=PR-713-b294b530 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-713-b294b530 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-713/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | ++++ which date logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ date=/usr/bin/date logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ command -v oc logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 15:49:33 | operator-self-healing/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 15:49:34 | operator-self-healing/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 15:49:34 | operator-self-healing/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 15:49:34 | operator-self-healing/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 15:49:34 | operator-self-healing/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 15:49:34 | operator-self-healing/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 15:49:34 | operator-self-healing/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 15:49:34 | operator-self-healing/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 15:49:35 | operator-self-healing/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 15:49:41 | operator-self-healing/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 15:49:41 | operator-self-healing | operator-self-healing events from ns kuttl-test-divine-python: logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:03 +0000 UTC Normal Pod mysql-client Scheduled Successfully assigned kuttl-test-divine-python/mysql-client to gke-jen-ps-713-b294b530--default-pool-ac4318d2-w4g9 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:04 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.0.33" already present on machine kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:04 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container mysql-client kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:04 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:43 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-fnz88 Scheduled Successfully assigned kuttl-test-divine-python/chaos-controller-manager-c9f559c9c-fnz88 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-w4g9 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:43 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-c9f559c9c SuccessfulCreate Created pod: chaos-controller-manager-c9f559c9c-fnz88 replicaset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:43 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-c9f559c9c SuccessfulCreate Created pod: chaos-controller-manager-c9f559c9c-v627n replicaset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:43 +0000 UTC Normal Deployment.apps chaos-controller-manager ScalingReplicaSet Scaled up replica set chaos-controller-manager-c9f559c9c to 3 deployment-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:43 +0000 UTC Normal Pod chaos-daemon-22n4w Scheduled Successfully assigned kuttl-test-divine-python/chaos-daemon-22n4w to gke-jen-ps-713-b294b530--default-pool-ac4318d2-2jr0 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:43 +0000 UTC Normal Pod chaos-daemon-sqjxh Scheduled Successfully assigned kuttl-test-divine-python/chaos-daemon-sqjxh to gke-jen-ps-713-b294b530--default-pool-ac4318d2-ts4r default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:43 +0000 UTC Normal Pod chaos-daemon-zd9qb Scheduled Successfully assigned kuttl-test-divine-python/chaos-daemon-zd9qb to gke-jen-ps-713-b294b530--default-pool-ac4318d2-w4g9 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:43 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-sqjxh daemonset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:43 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-zd9qb daemonset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:43 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-22n4w daemonset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:44 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-9vfp6 Scheduled Successfully assigned kuttl-test-divine-python/chaos-controller-manager-c9f559c9c-9vfp6 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-ts4r default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:44 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-fnz88.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:44 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-v627n Scheduled Successfully assigned kuttl-test-divine-python/chaos-controller-manager-c9f559c9c-v627n to gke-jen-ps-713-b294b530--default-pool-ac4318d2-2jr0 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:44 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-v627n.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:44 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-c9f559c9c SuccessfulCreate Created pod: chaos-controller-manager-c9f559c9c-9vfp6 replicaset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:44 +0000 UTC Normal Pod chaos-daemon-22n4w.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:44 +0000 UTC Normal Pod chaos-daemon-sqjxh.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:44 +0000 UTC Normal Pod chaos-daemon-zd9qb.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:45 +0000 UTC Warning Pod chaos-controller-manager-c9f559c9c-9vfp6 FailedMount MountVolume.SetUp failed for volume "webhook-certs" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:46 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-9vfp6.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:47 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-fnz88.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 2.686s (2.686s including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:47 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-fnz88.spec.containers{chaos-mesh} Created Created container chaos-mesh kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:47 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-fnz88.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:47 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-v627n.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 3.094s (3.094s including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:47 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-v627n.spec.containers{chaos-mesh} Created Created container chaos-mesh kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:47 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-v627n.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:47 +0000 UTC Normal ConfigMap chaos-mesh LeaderElection chaos-controller-manager-c9f559c9c-fnz88_3e115561-1ed9-4f98-b215-9fc6c28aae4c became leader logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:47 +0000 UTC Normal Lease.coordination.k8s.io chaos-mesh LeaderElection chaos-controller-manager-c9f559c9c-fnz88_3e115561-1ed9-4f98-b215-9fc6c28aae4c became leader logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:48 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-9vfp6.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 2.659s (2.659s including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:48 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-9vfp6.spec.containers{chaos-mesh} Created Created container chaos-mesh kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:48 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-9vfp6.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:53 +0000 UTC Normal Pod chaos-daemon-sqjxh.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 9.735s (9.735s including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:54 +0000 UTC Normal Pod chaos-daemon-22n4w.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 10.201s (10.201s including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:54 +0000 UTC Normal Pod chaos-daemon-22n4w.spec.containers{chaos-daemon} Created Created container chaos-daemon kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:54 +0000 UTC Normal Pod chaos-daemon-22n4w.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:54 +0000 UTC Normal Pod chaos-daemon-sqjxh.spec.containers{chaos-daemon} Created Created container chaos-daemon kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:54 +0000 UTC Normal Pod chaos-daemon-sqjxh.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:54 +0000 UTC Normal Pod chaos-daemon-zd9qb.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 10.489s (10.489s including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:54 +0000 UTC Normal Pod chaos-daemon-zd9qb.spec.containers{chaos-daemon} Created Created container chaos-daemon kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:54 +0000 UTC Normal Pod chaos-daemon-zd9qb.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:59 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:59 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:59 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-divine-python/datadir-operator-self-healing-mysql-0" pd.csi.storage.gke.io_gke-343654e66a3d4ebebe89-0934-dd6b-vm_1004650e-afa8-454f-86f4-476cc16b4048 logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:59 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Claim datadir-operator-self-healing-mysql-0 Pod operator-self-healing-mysql-0 in StatefulSet operator-self-healing-mysql success statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:40:59 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Pod operator-self-healing-mysql-0 in StatefulSet operator-self-healing-mysql successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:00 +0000 UTC Normal Pod operator-self-healing-orc-0 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-orc-0 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-2jr0 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:00 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:00 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 99ms (99ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:00 +0000 UTC Normal StatefulSet.apps operator-self-healing-orc SuccessfulCreate create Pod operator-self-healing-orc-0 in StatefulSet operator-self-healing-orc successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:01 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:01 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:03 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-7388f2fc-945d-4254-a976-358f301bf392 pd.csi.storage.gke.io_gke-343654e66a3d4ebebe89-0934-dd6b-vm_1004650e-afa8-454f-86f4-476cc16b4048 logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:03 +0000 UTC Normal Pod operator-self-healing-mysql-0 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-mysql-0 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-w4g9 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:07 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:07 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 92ms (92ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:08 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Created Created container orc kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:08 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:08 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:08 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 115ms (115ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:08 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:08 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:11 +0000 UTC Normal Pod operator-self-healing-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-7388f2fc-945d-4254-a976-358f301bf392" attachdetach-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:12 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:12 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 98ms (98ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:12 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:12 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:18 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:18 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 82ms (82ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:18 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:18 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:18 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:18 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 92ms (92ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:18 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:18 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:18 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:19 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 110ms (110ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:19 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:19 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:40 +0000 UTC Normal Pod operator-self-healing-orc-1 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-orc-1 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-2jr0 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:40 +0000 UTC Normal StatefulSet.apps operator-self-healing-orc SuccessfulCreate create Pod operator-self-healing-orc-1 in StatefulSet operator-self-healing-orc successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:41 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:41 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 109ms (110ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:41 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:41 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:42 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:42 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 86ms (86ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:42 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Created Created container orc kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:42 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:42 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:43 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 129ms (129ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:43 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:43 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:52 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:52 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:52 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-divine-python/datadir-operator-self-healing-mysql-1" pd.csi.storage.gke.io_gke-343654e66a3d4ebebe89-0934-dd6b-vm_1004650e-afa8-454f-86f4-476cc16b4048 logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:52 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Claim datadir-operator-self-healing-mysql-1 Pod operator-self-healing-mysql-1 in StatefulSet operator-self-healing-mysql success statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:52 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Pod operator-self-healing-mysql-1 in StatefulSet operator-self-healing-mysql successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:55 +0000 UTC Normal Pod operator-self-healing-haproxy-0 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-haproxy-0 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-w4g9 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:55 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:55 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-0 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:56 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-5b7cc679-eb49-4116-acbd-2f896bdf9305 pd.csi.storage.gke.io_gke-343654e66a3d4ebebe89-0934-dd6b-vm_1004650e-afa8-454f-86f4-476cc16b4048 logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:56 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 85ms (85ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:56 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:56 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:56 +0000 UTC Normal Pod operator-self-healing-mysql-1 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-mysql-1 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-ts4r default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:57 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:57 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 80ms (80ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:57 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:57 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:57 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:57 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 86ms (86ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:57 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:58 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:58 +0000 UTC Normal Pod operator-self-healing-haproxy-1 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-haproxy-1 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-2jr0 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:58 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-1 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:59 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:59 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 103ms (103ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:59 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:41:59 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:00 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:00 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 108ms (108ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:00 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:01 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:01 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:01 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 91ms (91ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:01 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:01 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:01 +0000 UTC Normal Pod operator-self-healing-haproxy-2 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-haproxy-2 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-ts4r default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:01 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-2 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:02 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:02 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 96ms (96ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:02 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:02 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:04 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:04 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 77ms (77ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:04 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:04 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:04 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:04 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 88ms (88ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:04 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:04 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:04 +0000 UTC Normal Pod operator-self-healing-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-5b7cc679-eb49-4116-acbd-2f896bdf9305" attachdetach-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:05 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:05 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 95ms (95ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:05 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:05 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 90ms (90ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 85ms (85ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 90ms (90ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:07 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:16 +0000 UTC Normal Pod operator-self-healing-orc-2 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-orc-2 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-w4g9 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:16 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:16 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 87ms (87ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:16 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:16 +0000 UTC Normal StatefulSet.apps operator-self-healing-orc SuccessfulCreate create Pod operator-self-healing-orc-2 in StatefulSet operator-self-healing-orc successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:17 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:18 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:18 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 79ms (79ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:18 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Created Created container orc kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:18 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:18 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:19 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 101ms (101ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:19 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:19 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:26 +0000 UTC Warning Pod operator-self-healing-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:26 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:42:29 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 105ms (105ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:00 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:00 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:00 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-divine-python/datadir-operator-self-healing-mysql-2" pd.csi.storage.gke.io_gke-343654e66a3d4ebebe89-0934-dd6b-vm_1004650e-afa8-454f-86f4-476cc16b4048 logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:00 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Claim datadir-operator-self-healing-mysql-2 Pod operator-self-healing-mysql-2 in StatefulSet operator-self-healing-mysql success statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:00 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Pod operator-self-healing-mysql-2 in StatefulSet operator-self-healing-mysql successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:04 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-dd4047c0-b58c-48c9-b9a5-8846363eaa13 pd.csi.storage.gke.io_gke-343654e66a3d4ebebe89-0934-dd6b-vm_1004650e-afa8-454f-86f4-476cc16b4048 logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:04 +0000 UTC Normal Pod operator-self-healing-mysql-2 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-mysql-2 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-2jr0 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:12 +0000 UTC Normal Pod operator-self-healing-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-dd4047c0-b58c-48c9-b9a5-8846363eaa13" attachdetach-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 102ms (102ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 88ms (88ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 92ms (92ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 74ms (74ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:15 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:34 +0000 UTC Warning Pod operator-self-healing-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:34 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:43:38 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 98ms (98ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:05 +0000 UTC Normal Pod operator-self-healing-haproxy-3 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-haproxy-3 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-w4g9 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:05 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:05 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 103ms (103ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:05 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:05 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:05 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-3 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:07 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:07 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 89ms (89ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:07 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:07 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:07 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:07 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 85ms (85ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:07 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:07 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:08 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-4 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:09 +0000 UTC Normal Pod operator-self-healing-haproxy-4 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-haproxy-4 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-ts4r default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:09 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:09 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 164ms (164ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:09 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:09 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:11 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:11 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 75ms (75ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:11 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:11 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:11 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:12 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 90ms (90ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:12 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:45:12 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:47:09 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:47:09 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:47:09 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulDelete delete Pod operator-self-healing-haproxy-4 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:47:10 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:47:10 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:47:10 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulDelete delete Pod operator-self-healing-haproxy-3 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:37 +0000 UTC Normal Pod operator-self-healing-haproxy-3 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-haproxy-3 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-w4g9 default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:37 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:37 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 85ms (85ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:38 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:38 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:39 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:39 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 77ms (77ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:39 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:40 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:40 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:40 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 82ms (82ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:40 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:40 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:41 +0000 UTC Normal Pod operator-self-healing-haproxy-4 Scheduled Successfully assigned kuttl-test-divine-python/operator-self-healing-haproxy-4 to gke-jen-ps-713-b294b530--default-pool-ac4318d2-ts4r default-scheduler logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:41 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-713-b294b530" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:41 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-713-b294b530" in 88ms (88ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:41 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:41 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:43 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:43 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 76ms (76ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:43 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:43 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:43 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:43 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 93ms (93ms including waiting) kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:43 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:43 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:50 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-9vfp6.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:50 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-fnz88.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:50 +0000 UTC Normal Pod chaos-controller-manager-c9f559c9c-v627n.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:50 +0000 UTC Normal Pod chaos-daemon-22n4w.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:50 +0000 UTC Normal Pod chaos-daemon-sqjxh.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:48:50 +0000 UTC Normal Pod chaos-daemon-zd9qb.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:34 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:35 +0000 UTC Warning Pod operator-self-healing-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2024/07/31 15:49:35 readiness check failed: connect to db: ping DB: dial tcp 10.175.232.26:33062: connect: connection refused kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:35 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:35 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:49:41 | operator-self-healing | 2024-07-31 15:49:37 +0000 UTC Warning Pod operator-self-healing-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2024/07/31 15:49:37 readiness check failed: connect to db: ping DB: dial tcp 10.175.233.30:33062: connect: connection refused kubelet logger.go:42: 15:49:41 | operator-self-healing | Deleting namespace: kuttl-test-divine-python === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- PASS: kuttl (627.87s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/operator-self-healing (627.43s) PASS