=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://34.56.141.152 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 34 tests === RUN kuttl/harness === RUN kuttl/harness/gr-self-healing === PAUSE kuttl/harness/gr-self-healing === CONT kuttl/harness/gr-self-healing logger.go:42: 02:26:54 | gr-self-healing | Creating namespace: kuttl-test-ethical-swan logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | + source ../../functions logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ realpath ../../.. logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | ++++ pwd logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | ++ test_name=gr-self-healing logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ GIT_BRANCH=PR-764 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | ++++ which gdate logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | ++++ which date logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ command -v oc logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ kubectl get nodes logger.go:42: 02:26:54 | gr-self-healing/0-deploy-operator | +++ grep '^minikube' logger.go:42: 02:26:55 | gr-self-healing/0-deploy-operator | + init_temp_dir logger.go:42: 02:26:55 | gr-self-healing/0-deploy-operator | + rm -rf /tmp/kuttl/ps/gr-self-healing logger.go:42: 02:26:55 | gr-self-healing/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/gr-self-healing logger.go:42: 02:26:55 | gr-self-healing/0-deploy-operator | + deploy_operator logger.go:42: 02:26:55 | gr-self-healing/0-deploy-operator | + destroy_operator logger.go:42: 02:26:55 | gr-self-healing/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 02:26:55 | gr-self-healing/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 02:26:55 | gr-self-healing/0-deploy-operator | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 02:26:55 | gr-self-healing/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 02:26:55 | gr-self-healing/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 02:26:55 | gr-self-healing/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 02:26:56 | gr-self-healing/0-deploy-operator | namespace "ps-operator" force deleted logger.go:42: 02:27:01 | gr-self-healing/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 02:27:01 | gr-self-healing/0-deploy-operator | + create_namespace ps-operator logger.go:42: 02:27:01 | gr-self-healing/0-deploy-operator | + local namespace=ps-operator logger.go:42: 02:27:01 | gr-self-healing/0-deploy-operator | + [[ -n '' ]] logger.go:42: 02:27:01 | gr-self-healing/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 02:27:02 | gr-self-healing/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 02:27:02 | gr-self-healing/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 02:27:02 | gr-self-healing/0-deploy-operator | namespace/ps-operator created logger.go:42: 02:27:02 | gr-self-healing/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy/crd.yaml logger.go:42: 02:27:03 | gr-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 02:27:03 | gr-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 02:27:04 | gr-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 02:27:04 | gr-self-healing/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 02:27:04 | gr-self-healing/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy/cw-rbac.yaml logger.go:42: 02:27:05 | gr-self-healing/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 02:27:05 | gr-self-healing/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 02:27:05 | gr-self-healing/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 02:27:06 | gr-self-healing/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 02:27:06 | gr-self-healing/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 02:27:06 | gr-self-healing/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 02:27:06 | gr-self-healing/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 02:27:06 | gr-self-healing/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:27:06 | gr-self-healing/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 02:27:06 | gr-self-healing/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-764-2039b0b5"' /mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy/cw-operator.yaml logger.go:42: 02:27:07 | gr-self-healing/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 02:27:07 | gr-self-healing/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 02:27:07 | gr-self-healing/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 02:27:07 | gr-self-healing/0-deploy-operator | + kubectl -n kuttl-test-ethical-swan apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf/secrets.yaml logger.go:42: 02:27:08 | gr-self-healing/0-deploy-operator | secret/test-secrets created logger.go:42: 02:27:08 | gr-self-healing/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 02:27:08 | gr-self-healing/0-deploy-operator | + kubectl -n kuttl-test-ethical-swan apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf/ssl-secret.yaml logger.go:42: 02:27:09 | gr-self-healing/0-deploy-operator | secret/test-ssl created logger.go:42: 02:27:09 | gr-self-healing/0-deploy-operator | + deploy_client logger.go:42: 02:27:09 | gr-self-healing/0-deploy-operator | + kubectl -n kuttl-test-ethical-swan apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf/client.yaml logger.go:42: 02:27:10 | gr-self-healing/0-deploy-operator | pod/mysql-client created logger.go:42: 02:27:11 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:27:11 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:27:11 | gr-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:27:12 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:27:12 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:27:13 | gr-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:27:14 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:27:14 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:27:14 | gr-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:27:16 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:27:16 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:27:16 | gr-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:27:17 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:27:17 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:27:18 | gr-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:27:19 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:27:19 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:27:19 | gr-self-healing/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 02:27:19 | gr-self-healing/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 02:27:19 | gr-self-healing/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 02:27:19 | gr-self-healing/0-deploy-operator | ASSERT PASS logger.go:42: 02:27:19 | gr-self-healing/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | starting test step 1-deploy-chaos-mesh logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_chaos_mesh] logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | + source ../../functions logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ realpath ../../.. logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | ++++ pwd logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | ++ test_name=gr-self-healing logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ GIT_BRANCH=PR-764 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | ++++ which gdate logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | ++++ which date logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ date=/usr/bin/date logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ command -v oc logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ kubectl get nodes logger.go:42: 02:27:19 | gr-self-healing/1-deploy-chaos-mesh | +++ grep '^minikube' logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | + deploy_chaos_mesh logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | + destroy_chaos_mesh logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | ++ tail -n1 logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '-F ' '{print $2}' logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | ++ sed s/NAMESPACE// logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-764/kubeconfig logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-764/kubeconfig logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | + local chaos_mesh_ns= logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | + '[' -n '' ']' logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 02:27:20 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:27:21 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration chaos-mesh-mutation logger.go:42: 02:27:21 | gr-self-healing/1-deploy-chaos-mesh | mutatingwebhookconfiguration.admissionregistration.k8s.io "chaos-mesh-mutation" deleted logger.go:42: 02:27:21 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 02:27:21 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 02:27:21 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:27:22 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration chaos-mesh-validation chaos-mesh-validation-auth logger.go:42: 02:27:22 | gr-self-healing/1-deploy-chaos-mesh | validatingwebhookconfiguration.admissionregistration.k8s.io "chaos-mesh-validation" deleted logger.go:42: 02:27:22 | gr-self-healing/1-deploy-chaos-mesh | validatingwebhookconfiguration.admissionregistration.k8s.io "chaos-mesh-validation-auth" deleted logger.go:42: 02:27:22 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 02:27:22 | gr-self-healing/1-deploy-chaos-mesh | ++ grep validate-auth logger.go:42: 02:27:22 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:27:23 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 02:27:23 | gr-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 02:27:23 | gr-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 02:27:23 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl api-resources logger.go:42: 02:27:23 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 02:27:23 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:27:24 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:24 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:24 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:24 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete awschaos --all --all-namespaces logger.go:42: 02:27:24 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:24 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:24 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:24 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:25 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete azurechaos --all --all-namespaces logger.go:42: 02:27:25 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:25 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:25 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:25 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:26 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete blockchaos --all --all-namespaces logger.go:42: 02:27:26 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:26 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:26 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:26 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:26 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete dnschaos --all --all-namespaces logger.go:42: 02:27:27 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:27 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:27 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:27 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:27 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete gcpchaos --all --all-namespaces logger.go:42: 02:27:27 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:27 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:27 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:27 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:28 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete httpchaos --all --all-namespaces logger.go:42: 02:27:28 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:28 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:28 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:28 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:29 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete iochaos --all --all-namespaces logger.go:42: 02:27:29 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:29 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:29 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:29 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:29 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete jvmchaos --all --all-namespaces logger.go:42: 02:27:30 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:30 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:30 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:30 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:30 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete kernelchaos --all --all-namespaces logger.go:42: 02:27:30 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:30 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:30 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:30 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-primary kuttl-test-picked-stag' logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | + local kind=NetworkChaos logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-primary kuttl-test-picked-stag' logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | + local name=chaos-pod-network-loss-primary logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-primary kuttl-test-picked-stag' logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | + local namespace=kuttl-test-picked-stag logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | + kubectl patch NetworkChaos chaos-pod-network-loss-primary -n kuttl-test-picked-stag --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | networkchaos.chaos-mesh.org/chaos-pod-network-loss-primary patched logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:31 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete networkchaos --all --all-namespaces logger.go:42: 02:27:32 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:32 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:32 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:32 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:32 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces logger.go:42: 02:27:32 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:32 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:32 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:32 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete physicalmachines --all --all-namespaces logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-primary kuttl-test-picked-stag' logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | + local kind=PodChaos logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-primary kuttl-test-picked-stag' logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | + local name=chaos-pod-failure-primary logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-primary kuttl-test-picked-stag' logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | + local namespace=kuttl-test-picked-stag logger.go:42: 02:27:33 | gr-self-healing/1-deploy-chaos-mesh | + kubectl patch PodChaos chaos-pod-failure-primary -n kuttl-test-picked-stag --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | podchaos.chaos-mesh.org/chaos-pod-failure-primary patched logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-primary kuttl-test-picked-stag' logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | + local kind=PodChaos logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-primary kuttl-test-picked-stag' logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | + local name=chaos-pod-kill-primary logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-primary kuttl-test-picked-stag' logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | + local namespace=kuttl-test-picked-stag logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | + kubectl patch PodChaos chaos-pod-kill-primary -n kuttl-test-picked-stag --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | podchaos.chaos-mesh.org/chaos-pod-kill-primary patched logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:34 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete podchaos --all --all-namespaces logger.go:42: 02:27:35 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:35 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:35 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:35 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:35 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete podhttpchaos --all --all-namespaces logger.go:42: 02:27:36 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:36 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:36 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:36 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:36 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete podiochaos --all --all-namespaces logger.go:42: 02:27:36 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:36 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:36 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:36 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:37 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces logger.go:42: 02:27:37 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:37 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:37 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:37 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:37 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete remoteclusters --all --all-namespaces logger.go:42: 02:27:38 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:38 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:38 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:38 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:38 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete schedules --all --all-namespaces logger.go:42: 02:27:38 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:38 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:38 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:38 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:39 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete statuschecks --all --all-namespaces logger.go:42: 02:27:39 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:39 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:39 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:39 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:40 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete stresschaos --all --all-namespaces logger.go:42: 02:27:40 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:40 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:40 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:40 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:40 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete timechaos --all --all-namespaces logger.go:42: 02:27:41 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:41 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:41 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:41 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:41 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete workflownodes --all --all-namespaces logger.go:42: 02:27:42 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:42 | gr-self-healing/1-deploy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:27:42 | gr-self-healing/1-deploy-chaos-mesh | + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:27:42 | gr-self-healing/1-deploy-chaos-mesh | + read -r line logger.go:42: 02:27:42 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete workflows --all --all-namespaces logger.go:42: 02:27:42 | gr-self-healing/1-deploy-chaos-mesh | No resources found logger.go:42: 02:27:42 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get crd logger.go:42: 02:27:42 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh.org logger.go:42: 02:27:42 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:27:43 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org logger.go:42: 02:27:43 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted logger.go:42: 02:27:44 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted logger.go:42: 02:27:44 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted logger.go:42: 02:27:44 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted logger.go:42: 02:27:44 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted logger.go:42: 02:27:44 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted logger.go:42: 02:27:44 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted logger.go:42: 02:27:45 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted logger.go:42: 02:27:45 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted logger.go:42: 02:27:45 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted logger.go:42: 02:27:45 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted logger.go:42: 02:27:45 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted logger.go:42: 02:27:45 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted logger.go:42: 02:27:45 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted logger.go:42: 02:27:45 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted logger.go:42: 02:27:46 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted logger.go:42: 02:27:46 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted logger.go:42: 02:27:46 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted logger.go:42: 02:27:46 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted logger.go:42: 02:27:47 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted logger.go:42: 02:27:47 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted logger.go:42: 02:27:48 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted logger.go:42: 02:27:49 | gr-self-healing/1-deploy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted logger.go:42: 02:27:53 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrolebinding logger.go:42: 02:27:53 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 02:27:53 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:27:54 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding chaos-mesh-chaos-controller-manager-cluster-level chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-dashboard-cluster-level chaos-mesh-chaos-dashboard-target-namespace logger.go:42: 02:27:54 | gr-self-healing/1-deploy-chaos-mesh | clusterrolebinding.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" deleted logger.go:42: 02:27:54 | gr-self-healing/1-deploy-chaos-mesh | clusterrolebinding.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" deleted logger.go:42: 02:27:54 | gr-self-healing/1-deploy-chaos-mesh | clusterrolebinding.rbac.authorization.k8s.io "chaos-mesh-chaos-dashboard-cluster-level" deleted logger.go:42: 02:27:54 | gr-self-healing/1-deploy-chaos-mesh | clusterrolebinding.rbac.authorization.k8s.io "chaos-mesh-chaos-dashboard-target-namespace" deleted logger.go:42: 02:27:55 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrole logger.go:42: 02:27:55 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 02:27:55 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:27:55 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrole chaos-mesh-chaos-controller-manager-cluster-level chaos-mesh-chaos-controller-manager-target-namespace chaos-mesh-chaos-dashboard-cluster-level chaos-mesh-chaos-dashboard-target-namespace logger.go:42: 02:27:56 | gr-self-healing/1-deploy-chaos-mesh | clusterrole.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-cluster-level" deleted logger.go:42: 02:27:56 | gr-self-healing/1-deploy-chaos-mesh | clusterrole.rbac.authorization.k8s.io "chaos-mesh-chaos-controller-manager-target-namespace" deleted logger.go:42: 02:27:56 | gr-self-healing/1-deploy-chaos-mesh | clusterrole.rbac.authorization.k8s.io "chaos-mesh-chaos-dashboard-cluster-level" deleted logger.go:42: 02:27:56 | gr-self-healing/1-deploy-chaos-mesh | clusterrole.rbac.authorization.k8s.io "chaos-mesh-chaos-dashboard-target-namespace" deleted logger.go:42: 02:27:57 | gr-self-healing/1-deploy-chaos-mesh | + helm repo add chaos-mesh https://charts.chaos-mesh.org logger.go:42: 02:27:57 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-764/kubeconfig logger.go:42: 02:27:57 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-764/kubeconfig logger.go:42: 02:27:57 | gr-self-healing/1-deploy-chaos-mesh | "chaos-mesh" already exists with the same configuration, skipping logger.go:42: 02:27:57 | gr-self-healing/1-deploy-chaos-mesh | + '[' -n '' ']' logger.go:42: 02:27:57 | gr-self-healing/1-deploy-chaos-mesh | + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=kuttl-test-ethical-swan --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 logger.go:42: 02:27:57 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-764/kubeconfig logger.go:42: 02:27:57 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-764/kubeconfig logger.go:42: 02:28:18 | gr-self-healing/1-deploy-chaos-mesh | NAME: chaos-mesh logger.go:42: 02:28:18 | gr-self-healing/1-deploy-chaos-mesh | LAST DEPLOYED: Wed Oct 30 02:28:07 2024 logger.go:42: 02:28:18 | gr-self-healing/1-deploy-chaos-mesh | NAMESPACE: kuttl-test-ethical-swan logger.go:42: 02:28:18 | gr-self-healing/1-deploy-chaos-mesh | STATUS: deployed logger.go:42: 02:28:18 | gr-self-healing/1-deploy-chaos-mesh | REVISION: 1 logger.go:42: 02:28:18 | gr-self-healing/1-deploy-chaos-mesh | TEST SUITE: None logger.go:42: 02:28:18 | gr-self-healing/1-deploy-chaos-mesh | NOTES: logger.go:42: 02:28:18 | gr-self-healing/1-deploy-chaos-mesh | 1. Make sure chaos-mesh components are running logger.go:42: 02:28:18 | gr-self-healing/1-deploy-chaos-mesh | kubectl get pods --namespace kuttl-test-ethical-swan -l app.kubernetes.io/instance=chaos-mesh logger.go:42: 02:28:18 | gr-self-healing/1-deploy-chaos-mesh | + sleep 10 [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 25 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc0002a9c00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc0002a9c00}, 0x0}, {0x184a055?, 0xc000783f80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc00043c9a0, {0x1accd90, 0xc00069e080}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc0000ec248?, {0x0, 0xc00043c9a0, {0x1accd90, 0xc00069e080}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc0000ec248, {0x0, 0xc00043c9a0, {0x1accd90, 0xc00069e080}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc000142608, 0x47?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc00059dc70, 0xc000176820, {0xc00012d1a0, 0x17}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc00059dc70, 0xc000176820, {0xc00012d1a0, 0x17}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc0000a8460, 0xc000176820, 0xc0001b98c0) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc000176820) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc000176820, 0xc000513e60) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 24 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 02:28:28 | gr-self-healing/1-deploy-chaos-mesh | test step completed 1-deploy-chaos-mesh logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | starting test step 2-create-cluster logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="group-replication"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=false' - \ | yq eval '.spec.proxy.router.enabled=true' - \ | yq eval '.spec.proxy.router.size=3' - \ | yq eval '.spec.proxy.router.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=false' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | + source ../../functions logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ realpath ../../.. logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | ++++ pwd logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | ++ test_name=gr-self-healing logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ GIT_BRANCH=PR-764 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | ++++ which gdate logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | ++++ which date logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ date=/usr/bin/date logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ command -v oc logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ kubectl get nodes logger.go:42: 02:28:28 | gr-self-healing/2-create-cluster | +++ grep '^minikube' logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + get_cr logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + local name_suffix= logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="group-replication"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval .spec.proxy.router.size=3 - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval .spec.proxy.router.enabled=true - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval .spec.orchestrator.enabled=false - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval .spec.proxy.haproxy.enabled=false - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + kubectl -n kuttl-test-ethical-swan apply -f - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.proxy.router.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | ++ printf '.metadata.name="%s"' gr-self-healing logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.metadata.name="gr-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy/cr.yaml logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-764-2039b0b5"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + '[' -n '' ']' logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 02:28:29 | gr-self-healing/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 02:28:30 | gr-self-healing/2-create-cluster | perconaservermysql.ps.percona.com/gr-self-healing created logger.go:42: 02:32:12 | gr-self-healing/2-create-cluster | test step completed 2-create-cluster logger.go:42: 02:32:12 | gr-self-healing/3-write-data | starting test step 3-write-data logger.go:42: 02:32:12 | gr-self-healing/3-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" sleep 5] logger.go:42: 02:32:12 | gr-self-healing/3-write-data | + source ../../functions logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ realpath ../../.. logger.go:42: 02:32:12 | gr-self-healing/3-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | ++++ pwd logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:32:12 | gr-self-healing/3-write-data | ++ test_name=gr-self-healing logger.go:42: 02:32:12 | gr-self-healing/3-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:32:12 | gr-self-healing/3-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ GIT_BRANCH=PR-764 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:32:12 | gr-self-healing/3-write-data | ++++ which gdate logger.go:42: 02:32:12 | gr-self-healing/3-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:32:12 | gr-self-healing/3-write-data | ++++ which date logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ date=/usr/bin/date logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ command -v oc logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ kubectl get nodes logger.go:42: 02:32:12 | gr-self-healing/3-write-data | +++ grep '^minikube' logger.go:42: 02:32:13 | gr-self-healing/3-write-data | +++ get_cluster_name logger.go:42: 02:32:13 | gr-self-healing/3-write-data | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:32:13 | gr-self-healing/3-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 02:32:13 | gr-self-healing/3-write-data | ++ local cluster=gr-self-healing logger.go:42: 02:32:13 | gr-self-healing/3-write-data | ++ echo gr-self-healing-router logger.go:42: 02:32:13 | gr-self-healing/3-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:13 | gr-self-healing/3-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 02:32:13 | gr-self-healing/3-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:13 | gr-self-healing/3-write-data | + local pod= logger.go:42: 02:32:13 | gr-self-healing/3-write-data | ++ get_client_pod logger.go:42: 02:32:13 | gr-self-healing/3-write-data | ++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:32:14 | gr-self-healing/3-write-data | + client_pod=mysql-client logger.go:42: 02:32:14 | gr-self-healing/3-write-data | + wait_pod mysql-client logger.go:42: 02:32:14 | gr-self-healing/3-write-data | + local pod=mysql-client logger.go:42: 02:32:14 | gr-self-healing/3-write-data | + set +o xtrace logger.go:42: 02:32:14 | gr-self-healing/3-write-data | mysql-clienttrue logger.go:42: 02:32:14 | gr-self-healing/3-write-data | + sed -e 's/mysql: //' logger.go:42: 02:32:14 | gr-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:32:14 | gr-self-healing/3-write-data | + kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:16 | gr-self-healing/3-write-data | + : logger.go:42: 02:32:16 | gr-self-healing/3-write-data | +++ get_cluster_name logger.go:42: 02:32:16 | gr-self-healing/3-write-data | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:32:16 | gr-self-healing/3-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 02:32:16 | gr-self-healing/3-write-data | ++ local cluster=gr-self-healing logger.go:42: 02:32:16 | gr-self-healing/3-write-data | ++ echo gr-self-healing-router logger.go:42: 02:32:16 | gr-self-healing/3-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:16 | gr-self-healing/3-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 02:32:16 | gr-self-healing/3-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:16 | gr-self-healing/3-write-data | + local pod= logger.go:42: 02:32:16 | gr-self-healing/3-write-data | ++ get_client_pod logger.go:42: 02:32:16 | gr-self-healing/3-write-data | ++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:32:16 | gr-self-healing/3-write-data | + client_pod=mysql-client logger.go:42: 02:32:16 | gr-self-healing/3-write-data | + wait_pod mysql-client logger.go:42: 02:32:16 | gr-self-healing/3-write-data | + local pod=mysql-client logger.go:42: 02:32:16 | gr-self-healing/3-write-data | + set +o xtrace logger.go:42: 02:32:17 | gr-self-healing/3-write-data | mysql-clienttrue logger.go:42: 02:32:17 | gr-self-healing/3-write-data | + kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:17 | gr-self-healing/3-write-data | + sed -e 's/mysql: //' logger.go:42: 02:32:17 | gr-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:32:18 | gr-self-healing/3-write-data | + : logger.go:42: 02:32:18 | gr-self-healing/3-write-data | + sleep 5 logger.go:42: 02:32:23 | gr-self-healing/3-write-data | test step completed 3-write-data logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | starting test step 4-read-from-primary logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_mysql "SELECT * FROM myDB.myTable" "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 04-read-from-primary --from-literal=data="${data}"] logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | + source ../../functions logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ realpath ../../.. logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | ++++ pwd logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | ++ test_name=gr-self-healing logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ GIT_BRANCH=PR-764 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | ++++ which gdate logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | ++++ which date logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ date=/usr/bin/date logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ command -v oc logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ kubectl get nodes logger.go:42: 02:32:23 | gr-self-healing/4-read-from-primary | +++ grep '^minikube' logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | ++++ get_cluster_name logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | ++++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | +++ get_mysql_router_service gr-self-healing logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | +++ local cluster=gr-self-healing logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | +++ echo gr-self-healing-router logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | ++ local pod= logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | +++ get_client_pod logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | ++ client_pod=mysql-client logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | ++ wait_pod mysql-client logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | ++ local pod=mysql-client logger.go:42: 02:32:24 | gr-self-healing/4-read-from-primary | ++ set +o xtrace logger.go:42: 02:32:25 | gr-self-healing/4-read-from-primary | mysql-clienttrue logger.go:42: 02:32:25 | gr-self-healing/4-read-from-primary | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:25 | gr-self-healing/4-read-from-primary | ++ sed -e 's/mysql: //' logger.go:42: 02:32:25 | gr-self-healing/4-read-from-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:32:26 | gr-self-healing/4-read-from-primary | + data=100500 logger.go:42: 02:32:26 | gr-self-healing/4-read-from-primary | + kubectl create configmap -n kuttl-test-ethical-swan 04-read-from-primary --from-literal=data=100500 logger.go:42: 02:32:27 | gr-self-healing/4-read-from-primary | configmap/04-read-from-primary created logger.go:42: 02:32:27 | gr-self-healing/4-read-from-primary | test step completed 4-read-from-primary logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | starting test step 5-kill-primary logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_pod="$(get_primary_from_group_replication)" kill_pods "${NAMESPACE}" "pod" "$init_pod" "" "primary" sleep 10 # wait a bit for pod to be killed if [ "$init_pod" == "$(get_primary_from_group_replication)" ]; then echo "primary pod was not killed! something went wrong." exit 1 fi] logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | + source ../../functions logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ realpath ../../.. logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | ++++ pwd logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | ++ test_name=gr-self-healing logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ GIT_BRANCH=PR-764 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | ++++ which gdate logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | ++++ which date logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ date=/usr/bin/date logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ command -v oc logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ kubectl get nodes logger.go:42: 02:32:27 | gr-self-healing/5-kill-primary | +++ grep '^minikube' logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++ get_primary_from_group_replication logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++ cut -d. -f1 logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++++ get_cluster_name logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | +++ get_mysql_router_service gr-self-healing logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | +++ local cluster=gr-self-healing logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | +++ echo gr-self-healing-router logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++ local pod= logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | +++ get_client_pod logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++ client_pod=mysql-client logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++ wait_pod mysql-client logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++ local pod=mysql-client logger.go:42: 02:32:28 | gr-self-healing/5-kill-primary | ++ set +o xtrace logger.go:42: 02:32:29 | gr-self-healing/5-kill-primary | mysql-clienttrue logger.go:42: 02:32:29 | gr-self-healing/5-kill-primary | ++ sed -e 's/mysql: //' logger.go:42: 02:32:29 | gr-self-healing/5-kill-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:32:29 | gr-self-healing/5-kill-primary | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | + init_pod=gr-self-healing-mysql-0 logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | + kill_pods kuttl-test-ethical-swan pod gr-self-healing-mysql-0 '' primary logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | + local ns=kuttl-test-ethical-swan logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | + local selector=pod logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | + local pod_label=gr-self-healing-mysql-0 logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | + local label_value= logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | + local chaos_suffix=primary logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | + '[' pod == pod ']' logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | + kubectl apply --namespace kuttl-test-ethical-swan -f - logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | + yq eval ' logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | .metadata.name = "chaos-pod-kill-primary" | logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | del(.spec.selector.pods.test-namespace) | logger.go:42: 02:32:30 | gr-self-healing/5-kill-primary | .spec.selector.pods.kuttl-test-ethical-swan[0] = "gr-self-healing-mysql-0"' /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf/chaos-pod-kill.yml logger.go:42: 02:32:31 | gr-self-healing/5-kill-primary | podchaos.chaos-mesh.org/chaos-pod-kill-primary created logger.go:42: 02:32:31 | gr-self-healing/5-kill-primary | + sleep 5 logger.go:42: 02:32:36 | gr-self-healing/5-kill-primary | + sleep 10 logger.go:42: 02:32:46 | gr-self-healing/5-kill-primary | ++ get_primary_from_group_replication logger.go:42: 02:32:46 | gr-self-healing/5-kill-primary | ++ cut -d. -f1 logger.go:42: 02:32:46 | gr-self-healing/5-kill-primary | ++++ get_cluster_name logger.go:42: 02:32:46 | gr-self-healing/5-kill-primary | ++++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | +++ get_mysql_router_service gr-self-healing logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | +++ local cluster=gr-self-healing logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | +++ echo gr-self-healing-router logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | ++ local pod= logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | +++ get_client_pod logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | ++ client_pod=mysql-client logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | ++ wait_pod mysql-client logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | ++ local pod=mysql-client logger.go:42: 02:32:47 | gr-self-healing/5-kill-primary | ++ set +o xtrace logger.go:42: 02:32:48 | gr-self-healing/5-kill-primary | mysql-clienttrue logger.go:42: 02:32:48 | gr-self-healing/5-kill-primary | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:32:48 | gr-self-healing/5-kill-primary | ++ sed -e 's/mysql: //' logger.go:42: 02:32:48 | gr-self-healing/5-kill-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:32:49 | gr-self-healing/5-kill-primary | + '[' gr-self-healing-mysql-0 == gr-self-healing-mysql-1 ']' logger.go:42: 02:33:51 | gr-self-healing/5-kill-primary | test step completed 5-kill-primary logger.go:42: 02:33:51 | gr-self-healing/6-write-data | starting test step 6-write-data logger.go:42: 02:33:51 | gr-self-healing/6-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "INSERT myDB.myTable (id) VALUES (100501)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" sleep 5] logger.go:42: 02:33:51 | gr-self-healing/6-write-data | + source ../../functions logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ realpath ../../.. logger.go:42: 02:33:51 | gr-self-healing/6-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | ++++ pwd logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:33:51 | gr-self-healing/6-write-data | ++ test_name=gr-self-healing logger.go:42: 02:33:51 | gr-self-healing/6-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:33:51 | gr-self-healing/6-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ GIT_BRANCH=PR-764 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:33:51 | gr-self-healing/6-write-data | ++++ which gdate logger.go:42: 02:33:51 | gr-self-healing/6-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:33:51 | gr-self-healing/6-write-data | ++++ which date logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ date=/usr/bin/date logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ command -v oc logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ kubectl get nodes logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ grep '^minikube' logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ get_cluster_name logger.go:42: 02:33:51 | gr-self-healing/6-write-data | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:33:52 | gr-self-healing/6-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 02:33:52 | gr-self-healing/6-write-data | ++ local cluster=gr-self-healing logger.go:42: 02:33:52 | gr-self-healing/6-write-data | ++ echo gr-self-healing-router logger.go:42: 02:33:52 | gr-self-healing/6-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100501)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:33:52 | gr-self-healing/6-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100501)' logger.go:42: 02:33:52 | gr-self-healing/6-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:33:52 | gr-self-healing/6-write-data | + local pod= logger.go:42: 02:33:52 | gr-self-healing/6-write-data | ++ get_client_pod logger.go:42: 02:33:52 | gr-self-healing/6-write-data | ++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:33:52 | gr-self-healing/6-write-data | + client_pod=mysql-client logger.go:42: 02:33:52 | gr-self-healing/6-write-data | + wait_pod mysql-client logger.go:42: 02:33:52 | gr-self-healing/6-write-data | + local pod=mysql-client logger.go:42: 02:33:52 | gr-self-healing/6-write-data | + set +o xtrace logger.go:42: 02:33:52 | gr-self-healing/6-write-data | mysql-clienttrue logger.go:42: 02:33:52 | gr-self-healing/6-write-data | + kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100501)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:33:52 | gr-self-healing/6-write-data | + sed -e 's/mysql: //' logger.go:42: 02:33:52 | gr-self-healing/6-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:33:54 | gr-self-healing/6-write-data | + : logger.go:42: 02:33:54 | gr-self-healing/6-write-data | + sleep 5 logger.go:42: 02:33:59 | gr-self-healing/6-write-data | test step completed 6-write-data logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | starting test step 7-read-from-replicas logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions for i in 0 1 2; do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 07-read-from-replicas-${i} --from-literal=data="${data}" done] logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | + source ../../functions logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ realpath ../../.. logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | ++++ pwd logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | ++ test_name=gr-self-healing logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ GIT_BRANCH=PR-764 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | ++++ which gdate logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | ++++ which date logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ command -v oc logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ kubectl get nodes logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ grep '^minikube' logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ get_cluster_name logger.go:42: 02:33:59 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 0 logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ local index=0 logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ echo gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | + host=gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ local pod= logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | +++ get_client_pod logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ set +o xtrace logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | mysql-clienttrue logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:34:00 | gr-self-healing/7-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:34:02 | gr-self-healing/7-read-from-replicas | + data='100500 logger.go:42: 02:34:02 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 02:34:02 | gr-self-healing/7-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 07-read-from-replicas-0 '--from-literal=data=100500 logger.go:42: 02:34:02 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 02:34:02 | gr-self-healing/7-read-from-replicas | configmap/07-read-from-replicas-0 created logger.go:42: 02:34:02 | gr-self-healing/7-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:34:02 | gr-self-healing/7-read-from-replicas | +++ get_cluster_name logger.go:42: 02:34:02 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 1 logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ local index=1 logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ echo gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | + host=gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ local pod= logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | +++ get_client_pod logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ set +o xtrace logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | mysql-clienttrue logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:34:03 | gr-self-healing/7-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:34:05 | gr-self-healing/7-read-from-replicas | + data='100500 logger.go:42: 02:34:05 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 02:34:05 | gr-self-healing/7-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 07-read-from-replicas-1 '--from-literal=data=100500 logger.go:42: 02:34:05 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 02:34:05 | gr-self-healing/7-read-from-replicas | configmap/07-read-from-replicas-1 created logger.go:42: 02:34:05 | gr-self-healing/7-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:34:05 | gr-self-healing/7-read-from-replicas | +++ get_cluster_name logger.go:42: 02:34:05 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 2 logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ local index=2 logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ echo gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | + host=gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ local pod= logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | +++ get_client_pod logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:34:06 | gr-self-healing/7-read-from-replicas | ++ set +o xtrace logger.go:42: 02:34:07 | gr-self-healing/7-read-from-replicas | mysql-clienttrue logger.go:42: 02:34:07 | gr-self-healing/7-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:34:07 | gr-self-healing/7-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:34:07 | gr-self-healing/7-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:34:08 | gr-self-healing/7-read-from-replicas | + data='100500 logger.go:42: 02:34:08 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 02:34:08 | gr-self-healing/7-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 07-read-from-replicas-2 '--from-literal=data=100500 logger.go:42: 02:34:08 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 02:34:08 | gr-self-healing/7-read-from-replicas | configmap/07-read-from-replicas-2 created logger.go:42: 02:34:09 | gr-self-healing/7-read-from-replicas | test step completed 7-read-from-replicas logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | starting test step 8-failure-primary logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions failure_pod "${NAMESPACE}" "$(get_primary_from_group_replication)" "primary" sleep 10 # wait a bit for pod to be killed] logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | + source ../../functions logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ realpath ../../.. logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | ++++ pwd logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | ++ test_name=gr-self-healing logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ GIT_BRANCH=PR-764 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | ++++ which gdate logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | ++++ which date logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ date=/usr/bin/date logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ command -v oc logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ kubectl get nodes logger.go:42: 02:34:09 | gr-self-healing/8-failure-primary | +++ grep '^minikube' logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | ++ get_primary_from_group_replication logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | ++ cut -d. -f1 logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | ++++ get_cluster_name logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | ++++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | +++ get_mysql_router_service gr-self-healing logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | +++ local cluster=gr-self-healing logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | +++ echo gr-self-healing-router logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | ++ local pod= logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | +++ get_client_pod logger.go:42: 02:34:10 | gr-self-healing/8-failure-primary | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:34:11 | gr-self-healing/8-failure-primary | ++ client_pod=mysql-client logger.go:42: 02:34:11 | gr-self-healing/8-failure-primary | ++ wait_pod mysql-client logger.go:42: 02:34:11 | gr-self-healing/8-failure-primary | ++ local pod=mysql-client logger.go:42: 02:34:11 | gr-self-healing/8-failure-primary | ++ set +o xtrace logger.go:42: 02:34:11 | gr-self-healing/8-failure-primary | mysql-clienttrue logger.go:42: 02:34:11 | gr-self-healing/8-failure-primary | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:34:11 | gr-self-healing/8-failure-primary | ++ sed -e 's/mysql: //' logger.go:42: 02:34:11 | gr-self-healing/8-failure-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:34:12 | gr-self-healing/8-failure-primary | + failure_pod kuttl-test-ethical-swan gr-self-healing-mysql-1 primary logger.go:42: 02:34:12 | gr-self-healing/8-failure-primary | + local ns=kuttl-test-ethical-swan logger.go:42: 02:34:12 | gr-self-healing/8-failure-primary | + local pod=gr-self-healing-mysql-1 logger.go:42: 02:34:12 | gr-self-healing/8-failure-primary | + local chaos_suffix=primary logger.go:42: 02:34:12 | gr-self-healing/8-failure-primary | + yq eval ' logger.go:42: 02:34:12 | gr-self-healing/8-failure-primary | .metadata.name = "chaos-pod-failure-primary" | logger.go:42: 02:34:12 | gr-self-healing/8-failure-primary | del(.spec.selector.pods.test-namespace) | logger.go:42: 02:34:12 | gr-self-healing/8-failure-primary | .spec.selector.pods.kuttl-test-ethical-swan[0] = "gr-self-healing-mysql-1"' /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf/chaos-pod-failure.yml logger.go:42: 02:34:12 | gr-self-healing/8-failure-primary | + kubectl apply --namespace kuttl-test-ethical-swan -f - logger.go:42: 02:34:14 | gr-self-healing/8-failure-primary | podchaos.chaos-mesh.org/chaos-pod-failure-primary created logger.go:42: 02:34:14 | gr-self-healing/8-failure-primary | + sleep 5 logger.go:42: 02:34:19 | gr-self-healing/8-failure-primary | + sleep 10 logger.go:42: 02:36:27 | gr-self-healing/8-failure-primary | test step completed 8-failure-primary logger.go:42: 02:36:27 | gr-self-healing/9-write-data | starting test step 9-write-data logger.go:42: 02:36:27 | gr-self-healing/9-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "INSERT myDB.myTable (id) VALUES (100502)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" sleep 5] logger.go:42: 02:36:27 | gr-self-healing/9-write-data | + source ../../functions logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ realpath ../../.. logger.go:42: 02:36:27 | gr-self-healing/9-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | ++++ pwd logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:36:27 | gr-self-healing/9-write-data | ++ test_name=gr-self-healing logger.go:42: 02:36:27 | gr-self-healing/9-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:36:27 | gr-self-healing/9-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ GIT_BRANCH=PR-764 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:36:27 | gr-self-healing/9-write-data | ++++ which gdate logger.go:42: 02:36:27 | gr-self-healing/9-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:36:27 | gr-self-healing/9-write-data | ++++ which date logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ date=/usr/bin/date logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ command -v oc logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ kubectl get nodes logger.go:42: 02:36:27 | gr-self-healing/9-write-data | +++ grep '^minikube' logger.go:42: 02:36:28 | gr-self-healing/9-write-data | +++ get_cluster_name logger.go:42: 02:36:28 | gr-self-healing/9-write-data | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:36:28 | gr-self-healing/9-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 02:36:28 | gr-self-healing/9-write-data | ++ local cluster=gr-self-healing logger.go:42: 02:36:28 | gr-self-healing/9-write-data | ++ echo gr-self-healing-router logger.go:42: 02:36:28 | gr-self-healing/9-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100502)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:36:28 | gr-self-healing/9-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100502)' logger.go:42: 02:36:28 | gr-self-healing/9-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:36:28 | gr-self-healing/9-write-data | + local pod= logger.go:42: 02:36:28 | gr-self-healing/9-write-data | ++ get_client_pod logger.go:42: 02:36:28 | gr-self-healing/9-write-data | ++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:36:29 | gr-self-healing/9-write-data | + client_pod=mysql-client logger.go:42: 02:36:29 | gr-self-healing/9-write-data | + wait_pod mysql-client logger.go:42: 02:36:29 | gr-self-healing/9-write-data | + local pod=mysql-client logger.go:42: 02:36:29 | gr-self-healing/9-write-data | + set +o xtrace logger.go:42: 02:36:29 | gr-self-healing/9-write-data | mysql-clienttrue logger.go:42: 02:36:29 | gr-self-healing/9-write-data | + kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100502)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:36:29 | gr-self-healing/9-write-data | + sed -e 's/mysql: //' logger.go:42: 02:36:29 | gr-self-healing/9-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:36:30 | gr-self-healing/9-write-data | + : logger.go:42: 02:36:30 | gr-self-healing/9-write-data | + sleep 5 logger.go:42: 02:36:35 | gr-self-healing/9-write-data | test step completed 9-write-data logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | starting test step 10-read-from-replicas logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions for i in 0 1 2; do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 10-read-from-replicas-${i} --from-literal=data="${data}" done] logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | + source ../../functions logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ realpath ../../.. logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | ++++ pwd logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | ++ test_name=gr-self-healing logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ GIT_BRANCH=PR-764 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | ++++ which gdate logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | ++++ which date logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ command -v oc logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ kubectl get nodes logger.go:42: 02:36:35 | gr-self-healing/10-read-from-replicas | +++ grep '^minikube' logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | +++ get_cluster_name logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 0 logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | ++ local index=0 logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | ++ echo gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | + host=gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | ++ local pod= logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | +++ get_client_pod logger.go:42: 02:36:36 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:36:37 | gr-self-healing/10-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:36:37 | gr-self-healing/10-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:36:37 | gr-self-healing/10-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:36:37 | gr-self-healing/10-read-from-replicas | ++ set +o xtrace logger.go:42: 02:36:37 | gr-self-healing/10-read-from-replicas | mysql-clienttrue logger.go:42: 02:36:37 | gr-self-healing/10-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:36:37 | gr-self-healing/10-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:36:37 | gr-self-healing/10-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | + data='100500 logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 10-read-from-replicas-0 '--from-literal=data=100500 logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | configmap/10-read-from-replicas-0 created logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | +++ get_cluster_name logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 1 logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | ++ local index=1 logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | ++ echo gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | + host=gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | ++ local pod= logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | +++ get_client_pod logger.go:42: 02:36:39 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:36:40 | gr-self-healing/10-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:36:40 | gr-self-healing/10-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:36:40 | gr-self-healing/10-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:36:40 | gr-self-healing/10-read-from-replicas | ++ set +o xtrace logger.go:42: 02:36:40 | gr-self-healing/10-read-from-replicas | mysql-clienttrue logger.go:42: 02:36:40 | gr-self-healing/10-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:36:40 | gr-self-healing/10-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:36:40 | gr-self-healing/10-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | + data='100500 logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 10-read-from-replicas-1 '--from-literal=data=100500 logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | configmap/10-read-from-replicas-1 created logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | +++ get_cluster_name logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 2 logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | ++ local index=2 logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | ++ echo gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | + host=gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | ++ local pod= logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | +++ get_client_pod logger.go:42: 02:36:42 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:36:43 | gr-self-healing/10-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:36:43 | gr-self-healing/10-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:36:43 | gr-self-healing/10-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:36:43 | gr-self-healing/10-read-from-replicas | ++ set +o xtrace logger.go:42: 02:36:43 | gr-self-healing/10-read-from-replicas | mysql-clienttrue logger.go:42: 02:36:43 | gr-self-healing/10-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:36:43 | gr-self-healing/10-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:36:43 | gr-self-healing/10-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:36:45 | gr-self-healing/10-read-from-replicas | + data='100500 logger.go:42: 02:36:45 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 02:36:45 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 02:36:45 | gr-self-healing/10-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 10-read-from-replicas-2 '--from-literal=data=100500 logger.go:42: 02:36:45 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 02:36:45 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 02:36:45 | gr-self-healing/10-read-from-replicas | configmap/10-read-from-replicas-2 created logger.go:42: 02:36:46 | gr-self-healing/10-read-from-replicas | test step completed 10-read-from-replicas logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | starting test step 11-network-loss-primary logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions network_loss "${NAMESPACE}" "$(get_primary_from_group_replication)" "primary" sleep 30 # wait for new master to get elected] logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | + source ../../functions logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ realpath ../../.. logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | ++++ pwd logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | ++ test_name=gr-self-healing logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ GIT_BRANCH=PR-764 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | ++++ which gdate logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | ++++ which date logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ date=/usr/bin/date logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ command -v oc logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ kubectl get nodes logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | +++ grep '^minikube' logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | ++ get_primary_from_group_replication logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | ++ cut -d. -f1 logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | ++++ get_cluster_name logger.go:42: 02:36:46 | gr-self-healing/11-network-loss-primary | ++++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | +++ get_mysql_router_service gr-self-healing logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | +++ local cluster=gr-self-healing logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | +++ echo gr-self-healing-router logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | ++ local pod= logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | +++ get_client_pod logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | ++ client_pod=mysql-client logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | ++ wait_pod mysql-client logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | ++ local pod=mysql-client logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | ++ set +o xtrace logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | mysql-clienttrue logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | ++ sed -e 's/mysql: //' logger.go:42: 02:36:47 | gr-self-healing/11-network-loss-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:36:49 | gr-self-healing/11-network-loss-primary | + network_loss kuttl-test-ethical-swan gr-self-healing-mysql-0 primary logger.go:42: 02:36:49 | gr-self-healing/11-network-loss-primary | + local ns=kuttl-test-ethical-swan logger.go:42: 02:36:49 | gr-self-healing/11-network-loss-primary | + local pod=gr-self-healing-mysql-0 logger.go:42: 02:36:49 | gr-self-healing/11-network-loss-primary | + local chaos_suffix=primary logger.go:42: 02:36:49 | gr-self-healing/11-network-loss-primary | + yq eval ' logger.go:42: 02:36:49 | gr-self-healing/11-network-loss-primary | .metadata.name = "chaos-pod-network-loss-primary" | logger.go:42: 02:36:49 | gr-self-healing/11-network-loss-primary | del(.spec.selector.pods.test-namespace) | logger.go:42: 02:36:49 | gr-self-healing/11-network-loss-primary | .spec.selector.pods.kuttl-test-ethical-swan[0] = "gr-self-healing-mysql-0"' /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf/chaos-network-loss.yml logger.go:42: 02:36:49 | gr-self-healing/11-network-loss-primary | + kubectl apply --namespace kuttl-test-ethical-swan -f - logger.go:42: 02:36:50 | gr-self-healing/11-network-loss-primary | networkchaos.chaos-mesh.org/chaos-pod-network-loss-primary created logger.go:42: 02:36:50 | gr-self-healing/11-network-loss-primary | + sleep 5 logger.go:42: 02:36:55 | gr-self-healing/11-network-loss-primary | + sleep 30 logger.go:42: 02:37:50 | gr-self-healing/11-network-loss-primary | test step completed 11-network-loss-primary logger.go:42: 02:37:50 | gr-self-healing/12-write-data | starting test step 12-write-data logger.go:42: 02:37:50 | gr-self-healing/12-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "INSERT myDB.myTable (id) VALUES (100503)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" sleep 5] logger.go:42: 02:37:50 | gr-self-healing/12-write-data | + source ../../functions logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ realpath ../../.. logger.go:42: 02:37:50 | gr-self-healing/12-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | ++++ pwd logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:37:50 | gr-self-healing/12-write-data | ++ test_name=gr-self-healing logger.go:42: 02:37:50 | gr-self-healing/12-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:37:50 | gr-self-healing/12-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ GIT_BRANCH=PR-764 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:37:50 | gr-self-healing/12-write-data | ++++ which gdate logger.go:42: 02:37:50 | gr-self-healing/12-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:37:50 | gr-self-healing/12-write-data | ++++ which date logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ date=/usr/bin/date logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ command -v oc logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ kubectl get nodes logger.go:42: 02:37:50 | gr-self-healing/12-write-data | +++ grep '^minikube' logger.go:42: 02:37:51 | gr-self-healing/12-write-data | +++ get_cluster_name logger.go:42: 02:37:51 | gr-self-healing/12-write-data | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:37:51 | gr-self-healing/12-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 02:37:51 | gr-self-healing/12-write-data | ++ local cluster=gr-self-healing logger.go:42: 02:37:51 | gr-self-healing/12-write-data | ++ echo gr-self-healing-router logger.go:42: 02:37:51 | gr-self-healing/12-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100503)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:37:51 | gr-self-healing/12-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100503)' logger.go:42: 02:37:51 | gr-self-healing/12-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:37:51 | gr-self-healing/12-write-data | + local pod= logger.go:42: 02:37:51 | gr-self-healing/12-write-data | ++ get_client_pod logger.go:42: 02:37:51 | gr-self-healing/12-write-data | ++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:37:51 | gr-self-healing/12-write-data | + client_pod=mysql-client logger.go:42: 02:37:51 | gr-self-healing/12-write-data | + wait_pod mysql-client logger.go:42: 02:37:51 | gr-self-healing/12-write-data | + local pod=mysql-client logger.go:42: 02:37:51 | gr-self-healing/12-write-data | + set +o xtrace logger.go:42: 02:37:52 | gr-self-healing/12-write-data | mysql-clienttrue logger.go:42: 02:37:52 | gr-self-healing/12-write-data | + sed -e 's/mysql: //' logger.go:42: 02:37:52 | gr-self-healing/12-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:37:52 | gr-self-healing/12-write-data | + kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100503)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:37:53 | gr-self-healing/12-write-data | + : logger.go:42: 02:37:53 | gr-self-healing/12-write-data | + sleep 5 logger.go:42: 02:37:58 | gr-self-healing/12-write-data | test step completed 12-write-data logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | starting test step 13-read-from-replicas logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions for i in 0 1 2; do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 13-read-from-replicas-${i} --from-literal=data="${data}" done] logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | + source ../../functions logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ realpath ../../.. logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | ++++ pwd logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | ++ test_name=gr-self-healing logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ GIT_BRANCH=PR-764 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | ++++ which gdate logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | ++++ which date logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ command -v oc logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ kubectl get nodes logger.go:42: 02:37:58 | gr-self-healing/13-read-from-replicas | +++ grep '^minikube' logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | +++ get_cluster_name logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 0 logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | ++ local index=0 logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | ++ echo gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | + host=gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | ++ local pod= logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | +++ get_client_pod logger.go:42: 02:37:59 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:38:00 | gr-self-healing/13-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:38:00 | gr-self-healing/13-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:38:00 | gr-self-healing/13-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:38:00 | gr-self-healing/13-read-from-replicas | ++ set +o xtrace logger.go:42: 02:38:00 | gr-self-healing/13-read-from-replicas | mysql-clienttrue logger.go:42: 02:38:00 | gr-self-healing/13-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:38:00 | gr-self-healing/13-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:38:00 | gr-self-healing/13-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:38:01 | gr-self-healing/13-read-from-replicas | + data='100500 logger.go:42: 02:38:01 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 02:38:01 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 02:38:01 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 02:38:01 | gr-self-healing/13-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 13-read-from-replicas-0 '--from-literal=data=100500 logger.go:42: 02:38:01 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 02:38:01 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 02:38:01 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | configmap/13-read-from-replicas-0 created logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | +++ get_cluster_name logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 1 logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | ++ local index=1 logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | ++ echo gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | + host=gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | ++ local pod= logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | +++ get_client_pod logger.go:42: 02:38:02 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:38:03 | gr-self-healing/13-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:38:03 | gr-self-healing/13-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:38:03 | gr-self-healing/13-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:38:03 | gr-self-healing/13-read-from-replicas | ++ set +o xtrace logger.go:42: 02:38:03 | gr-self-healing/13-read-from-replicas | mysql-clienttrue logger.go:42: 02:38:03 | gr-self-healing/13-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:38:03 | gr-self-healing/13-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:38:03 | gr-self-healing/13-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:38:04 | gr-self-healing/13-read-from-replicas | + data='100500 logger.go:42: 02:38:04 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 02:38:04 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 02:38:04 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 02:38:04 | gr-self-healing/13-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 13-read-from-replicas-1 '--from-literal=data=100500 logger.go:42: 02:38:04 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 02:38:04 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 02:38:04 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | configmap/13-read-from-replicas-1 created logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | +++ get_cluster_name logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 2 logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | ++ local index=2 logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | ++ echo gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | + host=gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | ++ local pod= logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | +++ get_client_pod logger.go:42: 02:38:05 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:38:06 | gr-self-healing/13-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:38:06 | gr-self-healing/13-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:38:06 | gr-self-healing/13-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:38:06 | gr-self-healing/13-read-from-replicas | ++ set +o xtrace logger.go:42: 02:38:06 | gr-self-healing/13-read-from-replicas | mysql-clienttrue logger.go:42: 02:38:06 | gr-self-healing/13-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:38:06 | gr-self-healing/13-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:38:06 | gr-self-healing/13-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:38:07 | gr-self-healing/13-read-from-replicas | + data='100500 logger.go:42: 02:38:07 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 02:38:07 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 02:38:07 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 02:38:07 | gr-self-healing/13-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 13-read-from-replicas-2 '--from-literal=data=100500 logger.go:42: 02:38:07 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 02:38:07 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 02:38:07 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 02:38:08 | gr-self-healing/13-read-from-replicas | configmap/13-read-from-replicas-2 created logger.go:42: 02:38:32 | gr-self-healing/13-read-from-replicas | test step completed 13-read-from-replicas logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | starting test step 14-cluster-crash logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | running command: [sh -c set -o errexit set -o xtrace source ../../functions kill_pods "${NAMESPACE}" "label" "app.kubernetes.io/instance" "gr-self-healing" "cluster-crash" sleep 30 # wait for crash] logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | + source ../../functions logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ realpath ../../.. logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | ++++ pwd logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | ++ test_name=gr-self-healing logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ GIT_BRANCH=PR-764 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | ++++ which gdate logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | ++++ which date logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ date=/usr/bin/date logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ command -v oc logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ kubectl get nodes logger.go:42: 02:38:32 | gr-self-healing/14-cluster-crash | +++ grep '^minikube' logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | + kill_pods kuttl-test-ethical-swan label app.kubernetes.io/instance gr-self-healing cluster-crash logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | + local ns=kuttl-test-ethical-swan logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | + local selector=label logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | + local pod_label=app.kubernetes.io/instance logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | + local label_value=gr-self-healing logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | + local chaos_suffix=cluster-crash logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | + '[' label == pod ']' logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | + '[' label == label ']' logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | + yq eval ' logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | .metadata.name = "chaos-kill-label-cluster-crash" | logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | .spec.mode = "all" | logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | del(.spec.selector.pods) | logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | .spec.selector.labelSelectors."app.kubernetes.io/instance" = "gr-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf/chaos-pod-kill.yml logger.go:42: 02:38:33 | gr-self-healing/14-cluster-crash | + kubectl apply --namespace kuttl-test-ethical-swan -f - logger.go:42: 02:38:34 | gr-self-healing/14-cluster-crash | podchaos.chaos-mesh.org/chaos-kill-label-cluster-crash created logger.go:42: 02:38:34 | gr-self-healing/14-cluster-crash | + sleep 5 logger.go:42: 02:38:39 | gr-self-healing/14-cluster-crash | + sleep 30 logger.go:42: 02:43:18 | gr-self-healing/14-cluster-crash | test step completed 14-cluster-crash logger.go:42: 02:43:18 | gr-self-healing/15-write-data | starting test step 15-write-data logger.go:42: 02:43:18 | gr-self-healing/15-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "INSERT myDB.myTable (id) VALUES (100504)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" sleep 5] logger.go:42: 02:43:18 | gr-self-healing/15-write-data | + source ../../functions logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ realpath ../../.. logger.go:42: 02:43:18 | gr-self-healing/15-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | ++++ pwd logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:43:18 | gr-self-healing/15-write-data | ++ test_name=gr-self-healing logger.go:42: 02:43:18 | gr-self-healing/15-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:43:18 | gr-self-healing/15-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ GIT_BRANCH=PR-764 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:43:18 | gr-self-healing/15-write-data | ++++ which gdate logger.go:42: 02:43:18 | gr-self-healing/15-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:43:18 | gr-self-healing/15-write-data | ++++ which date logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ date=/usr/bin/date logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ command -v oc logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ kubectl get nodes logger.go:42: 02:43:18 | gr-self-healing/15-write-data | +++ grep '^minikube' logger.go:42: 02:43:19 | gr-self-healing/15-write-data | +++ get_cluster_name logger.go:42: 02:43:19 | gr-self-healing/15-write-data | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:43:19 | gr-self-healing/15-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 02:43:19 | gr-self-healing/15-write-data | ++ local cluster=gr-self-healing logger.go:42: 02:43:19 | gr-self-healing/15-write-data | ++ echo gr-self-healing-router logger.go:42: 02:43:19 | gr-self-healing/15-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100504)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:43:19 | gr-self-healing/15-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100504)' logger.go:42: 02:43:19 | gr-self-healing/15-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:43:19 | gr-self-healing/15-write-data | + local pod= logger.go:42: 02:43:19 | gr-self-healing/15-write-data | ++ get_client_pod logger.go:42: 02:43:19 | gr-self-healing/15-write-data | ++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:43:20 | gr-self-healing/15-write-data | + client_pod=mysql-client logger.go:42: 02:43:20 | gr-self-healing/15-write-data | + wait_pod mysql-client logger.go:42: 02:43:20 | gr-self-healing/15-write-data | + local pod=mysql-client logger.go:42: 02:43:20 | gr-self-healing/15-write-data | + set +o xtrace logger.go:42: 02:43:20 | gr-self-healing/15-write-data | mysql-clienttrue logger.go:42: 02:43:20 | gr-self-healing/15-write-data | + sed -e 's/mysql: //' logger.go:42: 02:43:20 | gr-self-healing/15-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:43:20 | gr-self-healing/15-write-data | + kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100504)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:43:22 | gr-self-healing/15-write-data | + : logger.go:42: 02:43:22 | gr-self-healing/15-write-data | + sleep 5 logger.go:42: 02:43:27 | gr-self-healing/15-write-data | test step completed 15-write-data logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | starting test step 16-read-from-replicas logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions for i in 0 1 2; do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 16-read-from-replicas-${i} --from-literal=data="${data}" done] logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | + source ../../functions logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ realpath ../../.. logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++++ pwd logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++ test_name=gr-self-healing logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ GIT_BRANCH=PR-764 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++++ which gdate logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++++ which date logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ command -v oc logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ kubectl get nodes logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ grep '^minikube' logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ get_cluster_name logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 0 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++ local index=0 logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++ echo gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | + host=gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | ++ local pod= logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ get_client_pod logger.go:42: 02:43:27 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:43:28 | gr-self-healing/16-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:43:28 | gr-self-healing/16-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:43:28 | gr-self-healing/16-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:43:28 | gr-self-healing/16-read-from-replicas | ++ set +o xtrace logger.go:42: 02:43:28 | gr-self-healing/16-read-from-replicas | mysql-clienttrue logger.go:42: 02:43:28 | gr-self-healing/16-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:43:28 | gr-self-healing/16-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:43:28 | gr-self-healing/16-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | + data='100500 logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 16-read-from-replicas-0 '--from-literal=data=100500 logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | configmap/16-read-from-replicas-0 created logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | +++ get_cluster_name logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 1 logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | ++ local index=1 logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | ++ echo gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | + host=gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | ++ local pod= logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | +++ get_client_pod logger.go:42: 02:43:30 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:43:31 | gr-self-healing/16-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:43:31 | gr-self-healing/16-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:43:31 | gr-self-healing/16-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:43:31 | gr-self-healing/16-read-from-replicas | ++ set +o xtrace logger.go:42: 02:43:31 | gr-self-healing/16-read-from-replicas | mysql-clienttrue logger.go:42: 02:43:31 | gr-self-healing/16-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:43:31 | gr-self-healing/16-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:43:31 | gr-self-healing/16-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | + data='100500 logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 16-read-from-replicas-1 '--from-literal=data=100500 logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | configmap/16-read-from-replicas-1 created logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | + for i in 0 1 2 logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | +++ get_cluster_name logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 2 logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | ++ local index=2 logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | ++ echo gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | + host=gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | ++ local pod= logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | +++ get_client_pod logger.go:42: 02:43:33 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:43:34 | gr-self-healing/16-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 02:43:34 | gr-self-healing/16-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 02:43:34 | gr-self-healing/16-read-from-replicas | ++ local pod=mysql-client logger.go:42: 02:43:34 | gr-self-healing/16-read-from-replicas | ++ set +o xtrace logger.go:42: 02:43:34 | gr-self-healing/16-read-from-replicas | mysql-clienttrue logger.go:42: 02:43:34 | gr-self-healing/16-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 02:43:34 | gr-self-healing/16-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:43:34 | gr-self-healing/16-read-from-replicas | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 02:43:36 | gr-self-healing/16-read-from-replicas | + data='100500 logger.go:42: 02:43:36 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 02:43:36 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 02:43:36 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 02:43:36 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 02:43:36 | gr-self-healing/16-read-from-replicas | + kubectl create configmap -n kuttl-test-ethical-swan 16-read-from-replicas-2 '--from-literal=data=100500 logger.go:42: 02:43:36 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 02:43:36 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 02:43:36 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 02:43:36 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 02:43:36 | gr-self-healing/16-read-from-replicas | configmap/16-read-from-replicas-2 created logger.go:42: 02:43:37 | gr-self-healing/16-read-from-replicas | test step completed 16-read-from-replicas logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | starting test step 17-quorum-loss logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | running command: [sh -c set -o errexit set -o xtrace source ../../functions primary=$(get_primary_from_group_replication) a_replica=$(run_mysql \ "SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='SECONDARY' LIMIT 1;" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" | cut -d'.' -f1) kubectl -n ${NAMESPACE} delete pod ${primary} ${a_replica} --force --grace-period=0] logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | + source ../../functions logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ realpath ../../.. logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | ++++ pwd logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | ++ test_name=gr-self-healing logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ GIT_BRANCH=PR-764 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | ++++ which gdate logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | ++++ which date logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ date=/usr/bin/date logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ command -v oc logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ kubectl get nodes logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | +++ grep '^minikube' logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | ++ get_primary_from_group_replication logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | ++ cut -d. -f1 logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | ++++ get_cluster_name logger.go:42: 02:43:37 | gr-self-healing/17-quorum-loss | ++++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | +++ get_mysql_router_service gr-self-healing logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | +++ local cluster=gr-self-healing logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | +++ echo gr-self-healing-router logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | ++ local pod= logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | +++ get_client_pod logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | ++ client_pod=mysql-client logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | ++ wait_pod mysql-client logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | ++ local pod=mysql-client logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | ++ set +o xtrace logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | mysql-clienttrue logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | ++ sed -e 's/mysql: //' logger.go:42: 02:43:38 | gr-self-healing/17-quorum-loss | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | + primary=gr-self-healing-mysql-1 logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | ++ cut -d. -f1 logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | ++++ get_cluster_name logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | ++++ kubectl -n kuttl-test-ethical-swan get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | +++ get_mysql_router_service gr-self-healing logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | +++ local cluster=gr-self-healing logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | +++ echo gr-self-healing-router logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='\''SECONDARY'\'' LIMIT 1;' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='\''SECONDARY'\'' LIMIT 1;' logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | ++ local pod= logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | +++ get_client_pod logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | +++ kubectl -n kuttl-test-ethical-swan get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | ++ client_pod=mysql-client logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | ++ wait_pod mysql-client logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | ++ local pod=mysql-client logger.go:42: 02:43:41 | gr-self-healing/17-quorum-loss | ++ set +o xtrace logger.go:42: 02:43:42 | gr-self-healing/17-quorum-loss | mysql-clienttrue logger.go:42: 02:43:42 | gr-self-healing/17-quorum-loss | ++ kubectl -n kuttl-test-ethical-swan exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='\''SECONDARY'\'' LIMIT 1;" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 02:43:42 | gr-self-healing/17-quorum-loss | ++ sed -e 's/mysql: //' logger.go:42: 02:43:42 | gr-self-healing/17-quorum-loss | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 02:43:43 | gr-self-healing/17-quorum-loss | + a_replica=gr-self-healing-mysql-0 logger.go:42: 02:43:43 | gr-self-healing/17-quorum-loss | + kubectl -n kuttl-test-ethical-swan delete pod gr-self-healing-mysql-1 gr-self-healing-mysql-0 --force --grace-period=0 logger.go:42: 02:43:43 | gr-self-healing/17-quorum-loss | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 02:43:44 | gr-self-healing/17-quorum-loss | pod "gr-self-healing-mysql-1" force deleted logger.go:42: 02:43:44 | gr-self-healing/17-quorum-loss | pod "gr-self-healing-mysql-0" force deleted logger.go:42: 02:46:21 | gr-self-healing/17-quorum-loss | test step completed 17-quorum-loss logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | starting test step 97-destroy-chaos-mesh logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_chaos_mesh] logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | + source ../../functions logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ realpath ../../.. logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | ++++ pwd logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | ++ test_name=gr-self-healing logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ GIT_BRANCH=PR-764 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | ++++ which gdate logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | ++++ which date logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ date=/usr/bin/date logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ command -v oc logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ kubectl get nodes logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | +++ grep '^minikube' logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | + destroy_chaos_mesh logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | ++ tail -n1 logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '-F ' '{print $2}' logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | ++ sed s/NAMESPACE// logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-764/kubeconfig logger.go:42: 02:46:21 | gr-self-healing/97-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-764/kubeconfig logger.go:42: 02:46:22 | gr-self-healing/97-destroy-chaos-mesh | + local chaos_mesh_ns=kuttl-test-ethical-swan logger.go:42: 02:46:22 | gr-self-healing/97-destroy-chaos-mesh | + '[' -n kuttl-test-ethical-swan ']' logger.go:42: 02:46:22 | gr-self-healing/97-destroy-chaos-mesh | + helm uninstall --wait --timeout 60s chaos-mesh --namespace kuttl-test-ethical-swan logger.go:42: 02:46:22 | gr-self-healing/97-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-764/kubeconfig logger.go:42: 02:46:22 | gr-self-healing/97-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-764/kubeconfig logger.go:42: 02:46:32 | gr-self-healing/97-destroy-chaos-mesh | release "chaos-mesh" uninstalled logger.go:42: 02:46:32 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration logger.go:42: 02:46:32 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 02:46:32 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:46:32 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration logger.go:42: 02:46:32 | gr-self-healing/97-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 02:46:32 | gr-self-healing/97-destroy-chaos-mesh | + : logger.go:42: 02:46:32 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 02:46:32 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 02:46:32 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | + : logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | ++ grep validate-auth logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | + : logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl api-resources logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 02:46:33 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:46:34 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:34 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:34 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:34 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete awschaos --all --all-namespaces logger.go:42: 02:46:35 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:35 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:35 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:35 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:35 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete azurechaos --all --all-namespaces logger.go:42: 02:46:35 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:35 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:35 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:35 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:36 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete blockchaos --all --all-namespaces logger.go:42: 02:46:36 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:36 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:36 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:36 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:36 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete dnschaos --all --all-namespaces logger.go:42: 02:46:37 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:37 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:37 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:37 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:37 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete gcpchaos --all --all-namespaces logger.go:42: 02:46:38 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:38 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:38 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:38 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:38 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete httpchaos --all --all-namespaces logger.go:42: 02:46:38 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:38 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:38 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:38 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:39 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete iochaos --all --all-namespaces logger.go:42: 02:46:39 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:39 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:39 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:39 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:39 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete jvmchaos --all --all-namespaces logger.go:42: 02:46:40 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:40 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:40 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:40 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:40 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete kernelchaos --all --all-namespaces logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-primary kuttl-test-ethical-swan' logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | + local kind=NetworkChaos logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-primary kuttl-test-ethical-swan' logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | + local name=chaos-pod-network-loss-primary logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-primary kuttl-test-ethical-swan' logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | + local namespace=kuttl-test-ethical-swan logger.go:42: 02:46:41 | gr-self-healing/97-destroy-chaos-mesh | + kubectl patch NetworkChaos chaos-pod-network-loss-primary -n kuttl-test-ethical-swan --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 02:46:42 | gr-self-healing/97-destroy-chaos-mesh | networkchaos.chaos-mesh.org/chaos-pod-network-loss-primary patched logger.go:42: 02:46:42 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:42 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete networkchaos --all --all-namespaces logger.go:42: 02:46:42 | gr-self-healing/97-destroy-chaos-mesh | networkchaos.chaos-mesh.org "chaos-pod-network-loss-primary" deleted logger.go:42: 02:46:42 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:42 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:42 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:43 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces logger.go:42: 02:46:43 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:43 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:43 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:43 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:43 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachines --all --all-namespaces logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-kill-label-cluster-crash kuttl-test-ethical-swan' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + local kind=PodChaos logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-kill-label-cluster-crash kuttl-test-ethical-swan' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + local name=chaos-kill-label-cluster-crash logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-kill-label-cluster-crash kuttl-test-ethical-swan' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + local namespace=kuttl-test-ethical-swan logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + kubectl patch PodChaos chaos-kill-label-cluster-crash -n kuttl-test-ethical-swan --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org/chaos-kill-label-cluster-crash patched logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-primary kuttl-test-ethical-swan' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + local kind=PodChaos logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-primary kuttl-test-ethical-swan' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + local name=chaos-pod-failure-primary logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-primary kuttl-test-ethical-swan' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + local namespace=kuttl-test-ethical-swan logger.go:42: 02:46:44 | gr-self-healing/97-destroy-chaos-mesh | + kubectl patch PodChaos chaos-pod-failure-primary -n kuttl-test-ethical-swan --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org/chaos-pod-failure-primary patched logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-primary kuttl-test-ethical-swan' logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | + local kind=PodChaos logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-primary kuttl-test-ethical-swan' logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | + local name=chaos-pod-kill-primary logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-primary kuttl-test-ethical-swan' logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | + local namespace=kuttl-test-ethical-swan logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | + kubectl patch PodChaos chaos-pod-kill-primary -n kuttl-test-ethical-swan --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org/chaos-pod-kill-primary patched logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:45 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete podchaos --all --all-namespaces logger.go:42: 02:46:46 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-kill-label-cluster-crash" deleted logger.go:42: 02:46:46 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-failure-primary" deleted logger.go:42: 02:46:46 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-kill-primary" deleted logger.go:42: 02:46:46 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:46 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:46 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:47 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete podhttpchaos --all --all-namespaces logger.go:42: 02:46:47 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:47 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:47 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:47 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:48 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete podiochaos --all --all-namespaces logger.go:42: 02:46:48 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:48 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:48 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:48 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:48 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces logger.go:42: 02:46:49 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:49 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:49 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:49 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:49 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete remoteclusters --all --all-namespaces logger.go:42: 02:46:49 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:49 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:49 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:49 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:50 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete schedules --all --all-namespaces logger.go:42: 02:46:50 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:50 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:50 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:50 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:50 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete statuschecks --all --all-namespaces logger.go:42: 02:46:51 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:51 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:51 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:51 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:51 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete stresschaos --all --all-namespaces logger.go:42: 02:46:52 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:52 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:52 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:52 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:52 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete timechaos --all --all-namespaces logger.go:42: 02:46:52 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:52 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:52 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:52 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:53 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete workflownodes --all --all-namespaces logger.go:42: 02:46:53 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:53 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 02:46:53 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 02:46:53 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 02:46:53 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete workflows --all --all-namespaces logger.go:42: 02:46:54 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 02:46:54 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get crd logger.go:42: 02:46:54 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:46:54 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh.org logger.go:42: 02:46:55 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org logger.go:42: 02:46:55 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted logger.go:42: 02:46:55 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted logger.go:42: 02:46:55 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted logger.go:42: 02:46:55 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted logger.go:42: 02:46:56 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted logger.go:42: 02:46:56 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted logger.go:42: 02:46:56 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted logger.go:42: 02:46:56 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted logger.go:42: 02:46:56 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted logger.go:42: 02:46:56 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted logger.go:42: 02:46:57 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted logger.go:42: 02:46:57 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted logger.go:42: 02:46:57 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted logger.go:42: 02:46:57 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted logger.go:42: 02:46:57 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted logger.go:42: 02:46:58 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted logger.go:42: 02:46:58 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted logger.go:42: 02:46:59 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted logger.go:42: 02:46:59 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted logger.go:42: 02:46:59 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted logger.go:42: 02:47:00 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted logger.go:42: 02:47:01 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted logger.go:42: 02:47:03 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted logger.go:42: 02:47:07 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get clusterrolebinding logger.go:42: 02:47:07 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 02:47:07 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:47:08 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding logger.go:42: 02:47:08 | gr-self-healing/97-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 02:47:08 | gr-self-healing/97-destroy-chaos-mesh | + : logger.go:42: 02:47:08 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get clusterrole logger.go:42: 02:47:08 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 02:47:08 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 02:47:08 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrole logger.go:42: 02:47:08 | gr-self-healing/97-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 02:47:08 | gr-self-healing/97-destroy-chaos-mesh | + : logger.go:42: 02:47:08 | gr-self-healing/97-destroy-chaos-mesh | test step completed 97-destroy-chaos-mesh logger.go:42: 02:47:08 | gr-self-healing/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 02:47:09 | gr-self-healing/98-drop-finalizer | PerconaServerMySQL:kuttl-test-ethical-swan/gr-self-healing updated logger.go:42: 02:47:09 | gr-self-healing/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/tests/gr-self-healing logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | ++ test_name=gr-self-healing logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/vars.sh logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-764 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/deploy logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-764/e2e-tests/conf logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-764 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-764 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export VERSION=PR-764-2039b0b5 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ VERSION=PR-764-2039b0b5 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-764-2039b0b5 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-764/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | ++++ which date logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ date=/usr/bin/date logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ command -v oc logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 02:47:09 | gr-self-healing/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 02:47:10 | gr-self-healing/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 02:47:10 | gr-self-healing/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 02:47:10 | gr-self-healing/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 02:47:11 | gr-self-healing/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 02:47:11 | gr-self-healing/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 02:47:11 | gr-self-healing/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 02:47:11 | gr-self-healing/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 02:47:11 | gr-self-healing/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 02:47:17 | gr-self-healing/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 02:47:17 | gr-self-healing | gr-self-healing events from ns kuttl-test-ethical-swan: logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:27:10 +0000 UTC Normal Pod mysql-client Scheduled Successfully assigned kuttl-test-ethical-swan/mysql-client to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vh1s default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:27:11 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.0.33" already present on machine kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:27:11 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container mysql-client kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:27:11 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:16 +0000 UTC Normal Pod chaos-daemon-2hz68 Scheduled Successfully assigned kuttl-test-ethical-swan/chaos-daemon-2hz68 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-01xs default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:16 +0000 UTC Normal Pod chaos-daemon-2phzs Scheduled Successfully assigned kuttl-test-ethical-swan/chaos-daemon-2phzs to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vz92 default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:16 +0000 UTC Normal Pod chaos-daemon-nm8j4 Scheduled Successfully assigned kuttl-test-ethical-swan/chaos-daemon-nm8j4 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vh1s default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:16 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-2phzs daemonset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:16 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-nm8j4 daemonset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:16 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-2hz68 daemonset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-cfqsj Scheduled Successfully assigned kuttl-test-ethical-swan/chaos-controller-manager-7799d775db-cfqsj to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-01xs default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-cfqsj.spec.containers{chaos-mesh} Pulled Container image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" already present on machine kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-cfqsj.spec.containers{chaos-mesh} Created Created container chaos-mesh kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-cfqsj.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-drphc Scheduled Successfully assigned kuttl-test-ethical-swan/chaos-controller-manager-7799d775db-drphc to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vh1s default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-drphc.spec.containers{chaos-mesh} Pulled Container image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" already present on machine kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-drphc.spec.containers{chaos-mesh} Created Created container chaos-mesh kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-drphc.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-pkgmc Scheduled Successfully assigned kuttl-test-ethical-swan/chaos-controller-manager-7799d775db-pkgmc to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vz92 default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-pkgmc.spec.containers{chaos-mesh} Pulled Container image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" already present on machine kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-pkgmc.spec.containers{chaos-mesh} Created Created container chaos-mesh kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-7799d775db SuccessfulCreate Created pod: chaos-controller-manager-7799d775db-drphc replicaset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-7799d775db SuccessfulCreate Created pod: chaos-controller-manager-7799d775db-cfqsj replicaset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-7799d775db SuccessfulCreate Created pod: chaos-controller-manager-7799d775db-pkgmc replicaset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Deployment.apps chaos-controller-manager ScalingReplicaSet Scaled up replica set chaos-controller-manager-7799d775db to 3 deployment-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-daemon-2hz68.spec.containers{chaos-daemon} Pulled Container image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" already present on machine kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-daemon-2hz68.spec.containers{chaos-daemon} Created Created container chaos-daemon kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-daemon-2hz68.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-daemon-2phzs.spec.containers{chaos-daemon} Pulled Container image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" already present on machine kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-daemon-2phzs.spec.containers{chaos-daemon} Created Created container chaos-daemon kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:17 +0000 UTC Normal Pod chaos-daemon-2phzs.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:18 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-pkgmc.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:18 +0000 UTC Normal Pod chaos-daemon-nm8j4.spec.containers{chaos-daemon} Pulled Container image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" already present on machine kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:18 +0000 UTC Normal Pod chaos-daemon-nm8j4.spec.containers{chaos-daemon} Created Created container chaos-daemon kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:18 +0000 UTC Normal Pod chaos-daemon-nm8j4.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:18 +0000 UTC Normal ConfigMap chaos-mesh LeaderElection chaos-controller-manager-7799d775db-drphc_2342e3e1-bf94-4a82-ac34-b95153090417 became leader logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:18 +0000 UTC Normal Lease.coordination.k8s.io chaos-mesh LeaderElection chaos-controller-manager-7799d775db-drphc_2342e3e1-bf94-4a82-ac34-b95153090417 became leader logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:31 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:31 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:31 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-ethical-swan/datadir-gr-self-healing-mysql-0" pd.csi.storage.gke.io_gke-be530888b01b456ebe1d-f90d-5158-vm_67f015a9-9514-4dff-9594-88d07693e79c logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:31 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Claim datadir-gr-self-healing-mysql-0 Pod gr-self-healing-mysql-0 in StatefulSet gr-self-healing-mysql success statefulset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:31 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Pod gr-self-healing-mysql-0 in StatefulSet gr-self-healing-mysql successful statefulset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:34 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-c5102b54-aa8e-45b6-b15e-dc1a98751a30 pd.csi.storage.gke.io_gke-be530888b01b456ebe1d-f90d-5158-vm_67f015a9-9514-4dff-9594-88d07693e79c logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:35 +0000 UTC Normal Pod gr-self-healing-mysql-0 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-mysql-0 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-01xs default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:42 +0000 UTC Normal Pod gr-self-healing-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-c5102b54-aa8e-45b6-b15e-dc1a98751a30" attachdetach-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:43 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:43 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 120ms (120ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:43 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:43 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:45 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:45 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 99ms (99ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:45 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:45 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:45 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:46 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 152ms (152ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:46 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:28:46 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:18 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:18 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:18 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-ethical-swan/datadir-gr-self-healing-mysql-1" pd.csi.storage.gke.io_gke-be530888b01b456ebe1d-f90d-5158-vm_67f015a9-9514-4dff-9594-88d07693e79c logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:18 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Claim datadir-gr-self-healing-mysql-1 Pod gr-self-healing-mysql-1 in StatefulSet gr-self-healing-mysql success statefulset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:18 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Pod gr-self-healing-mysql-1 in StatefulSet gr-self-healing-mysql successful statefulset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:22 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-72c23bf5-5629-4b4d-b275-4b4ffe7024af pd.csi.storage.gke.io_gke-be530888b01b456ebe1d-f90d-5158-vm_67f015a9-9514-4dff-9594-88d07693e79c logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:22 +0000 UTC Normal Pod gr-self-healing-mysql-1 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-mysql-1 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vh1s default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:30 +0000 UTC Normal Pod gr-self-healing-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-72c23bf5-5629-4b4d-b275-4b4ffe7024af" attachdetach-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:31 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:31 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 107ms (107ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:31 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:31 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:33 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:33 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 99ms (99ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:33 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:33 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:33 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:33 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 106ms (106ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:33 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:29:33 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:04 +0000 UTC Warning Pod gr-self-healing-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2024/10/30 02:29:51 Bootstrap starting... 2024/10/30 02:29:51 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan', {'clearReadOnly': true}) WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable |Disabled super_read_only on the instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' Enabling super_read_only on the instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' was configured to be used in an InnoDB cluster. 2024/10/30 02:29:51 Instance (gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan) configured to join to the InnoDB cluster 2024/10/30 02:29:51 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan] 2024/10/30 02:29:51 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. 2024/10/30 02:29:52 Connected to peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan 2024/10/30 02:29:52 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Topology: Member 0 Address: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: ONLINE Errors: [] 2024/10/30 02:29:52 Adding instance (gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan) to InnoDB cluster 2024/10/30 02:29:52 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan', {'recoveryMethod': 'clone', 'waitRecovery': 3}) WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: The target instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to decide whether incremental state recovery can correctly provision it. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306... This instance reports its own address as gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 is being cloned from gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed** Stage DROP DATA: Completed NOTE: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:04 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:04 +0000 UTC Warning Pod gr-self-healing-mysql-1.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:04 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 122ms (122ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:36 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:36 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-ethical-swan/datadir-gr-self-healing-mysql-2" pd.csi.storage.gke.io_gke-be530888b01b456ebe1d-f90d-5158-vm_67f015a9-9514-4dff-9594-88d07693e79c logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:36 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:36 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Claim datadir-gr-self-healing-mysql-2 Pod gr-self-healing-mysql-2 in StatefulSet gr-self-healing-mysql success statefulset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:36 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Pod gr-self-healing-mysql-2 in StatefulSet gr-self-healing-mysql successful statefulset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:40 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-4b203ff8-4d81-4453-b59c-938d33560067 pd.csi.storage.gke.io_gke-be530888b01b456ebe1d-f90d-5158-vm_67f015a9-9514-4dff-9594-88d07693e79c logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:41 +0000 UTC Normal Pod gr-self-healing-mysql-2 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-mysql-2 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vz92 default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:49 +0000 UTC Normal Pod gr-self-healing-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-4b203ff8-4d81-4453-b59c-938d33560067" attachdetach-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:52 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:52 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 335ms (335ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:52 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:52 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:54 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:54 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 322ms (322ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:54 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:54 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:54 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:55 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 313ms (313ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:55 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:30:55 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:26 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2024/10/30 02:31:12 Bootstrap starting... 2024/10/30 02:31:12 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan', {'clearReadOnly': true}) WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable |Disabled super_read_only on the instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' Enabling super_read_only on the instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' was configured to be used in an InnoDB cluster. 2024/10/30 02:31:12 Instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan) configured to join to the InnoDB cluster 2024/10/30 02:31:12 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan] 2024/10/30 02:31:12 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. 2024/10/30 02:31:13 Connected to peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan 2024/10/30 02:31:14 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Topology: Member 0 Address: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: ONLINE Errors: [] Member 1 Address: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: ONLINE Errors: [] 2024/10/30 02:31:14 Adding instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan) to InnoDB cluster 2024/10/30 02:31:14 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan', {'recoveryMethod': 'clone', 'waitRecovery': 3}) WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: The target instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to decide whether incremental state recovery can correctly provision it. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 is being cloned from gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:26 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:26 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:26 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 316ms (316ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:59 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dgdlj Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-router-84cf595657-dgdlj to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vz92 default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:59 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dgdlj.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:59 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-jl654 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-router-84cf595657-jl654 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vh1s default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:59 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-jl654.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:59 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-v2vs4 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-router-84cf595657-v2vs4 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-01xs default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:59 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-84cf595657 SuccessfulCreate Created pod: gr-self-healing-router-84cf595657-v2vs4 replicaset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:59 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-84cf595657 SuccessfulCreate Created pod: gr-self-healing-router-84cf595657-dgdlj replicaset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:59 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-84cf595657 SuccessfulCreate Created pod: gr-self-healing-router-84cf595657-jl654 replicaset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:31:59 +0000 UTC Normal Deployment.apps gr-self-healing-router ScalingReplicaSet Scaled up replica set gr-self-healing-router-84cf595657 to 3 deployment-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:00 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dgdlj.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 297ms (297ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:00 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dgdlj.spec.initContainers{router-init} Created Created container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:00 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dgdlj.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:00 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-jl654.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 337ms (337ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:00 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-jl654.spec.initContainers{router-init} Created Created container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:00 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-jl654.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:00 +0000 UTC Warning Pod gr-self-healing-router-84cf595657-v2vs4 FailedMount MountVolume.SetUp failed for volume "config" : failed to sync configmap cache: timed out waiting for the condition kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:01 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dgdlj.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:01 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dgdlj.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 318ms (318ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:01 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dgdlj.spec.containers{router} Created Created container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:01 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dgdlj.spec.containers{router} Started Started container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:01 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-v2vs4.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:01 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-v2vs4.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 346ms (346ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:01 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-v2vs4.spec.initContainers{router-init} Created Created container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:01 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-v2vs4.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:02 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-jl654.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:02 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-jl654.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 309ms (309ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:02 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-jl654.spec.containers{router} Created Created container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:02 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-jl654.spec.containers{router} Started Started container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:03 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-v2vs4.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:03 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-v2vs4.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 296ms (296ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:03 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-v2vs4.spec.containers{router} Created Created container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:03 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-v2vs4.spec.containers{router} Started Started container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:31 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-primary FinalizerInited Finalizer has been inited logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:31 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-primary Updated Successfully update finalizer of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:31 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-primary Updated Successfully update desiredPhase of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:31 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-primary Applied Successfully apply chaos for kuttl-test-ethical-swan/gr-self-healing-mysql-0 logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:31 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:31 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:32 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-primary Updated Successfully update records of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:32 +0000 UTC Normal Pod gr-self-healing-mysql-0 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-mysql-0 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-01xs default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:32 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:32 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 321ms (321ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:32 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:33 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:34 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:34 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 319ms (319ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:34 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:34 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:34 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:35 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 316ms (316ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:35 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:32:35 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:33:14 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: 2024/10/30 02:32:52 Bootstrap starting... 2024/10/30 02:32:52 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan', {'clearReadOnly': true}) WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 applierWorkerThreads will be set to the default value of 4. The instance 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is valid to be used in an InnoDB cluster. The instance 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is already ready to be used in an InnoDB cluster. Successfully enabled parallel appliers. 2024/10/30 02:32:53 Instance (gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan) configured to join to the InnoDB cluster 2024/10/30 02:32:53 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan] 2024/10/30 02:32:53 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance does not belong to that metadata, and GR is not active) (RuntimeError) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:32:53 Failed get cluster from peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan, stdout: stderr: WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance does not belong to that metadata, and GR is not active) (RuntimeError) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:32:53 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. 2024/10/30 02:32:53 Connected to peer gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan 2024/10/30 02:32:54 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Topology: Member 0 Address: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: ONLINE Errors: [] Member 1 Address: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: ONLINE Errors: [] 2024/10/30 02:32:54 Adding instance (gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan) to InnoDB cluster 2024/10/30 02:32:54 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan', {'recoveryMethod': 'clone', 'waitRecovery': 3}) WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306... This instance reports its own address as gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 is being cloned from gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 ** Stage DROP DATA: \ ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:33:14 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:33:14 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:33:14 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 326ms (326ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:14 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary FinalizerInited Finalizer has been inited logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:14 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Updated Successfully update finalizer of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:14 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Started Experiment has started logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:14 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Updated Successfully update desiredPhase of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:14 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Applied Successfully apply chaos for kuttl-test-ethical-swan/gr-self-healing-mysql-1 logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:14 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Updated Successfully update records of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:14 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Killing Container mysql definition changed, will be restarted kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:21 +0000 UTC Warning Pod gr-self-healing-mysql-1.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 9b06d4acb232fa9dae0605b33901511aa0eb347823391f10b16f66ca9092eaf3 not found: not found kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:21 +0000 UTC Warning Pod gr-self-healing-mysql-1.spec.containers{mysql} Unhealthy Liveness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 9b06d4acb232fa9dae0605b33901511aa0eb347823391f10b16f66ca9092eaf3 not found: not found kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:21 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Killing Container xtrabackup definition changed, will be restarted kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:21 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulling Pulling image "gcr.io/google-containers/pause:latest" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:34:21 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "gcr.io/google-containers/pause:latest" in 80ms (80ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:35:13 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary TimeUp Time up according to the duration logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:35:13 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Updated Successfully update desiredPhase of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:35:13 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Recovered Successfully recover chaos for kuttl-test-ethical-swan/gr-self-healing-mysql-1 logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:35:13 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Updated Successfully update records of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:36:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary FinalizerInited Finalizer has been inited logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:36:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update finalizer of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:36:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Started Experiment has started logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:36:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update desiredPhase of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:36:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update records of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:36:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Applied Successfully apply chaos for kuttl-test-ethical-swan/gr-self-healing-mysql-0 logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:36:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update records of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:36:50 +0000 UTC Normal PodNetworkChaos.chaos-mesh.org gr-self-healing-mysql-0 Updated Successfully update ObservedGeneration and FailedMessage of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:01 +0000 UTC Warning Pod gr-self-healing-router-84cf595657-v2vs4.spec.containers{router} Unhealthy Readiness probe failed: Read-write route is not healthy kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:02 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Liveness probe failed: 2024/10/30 02:37:02 in primary partition: false 2024/10/30 02:37:02 liveness check failed: possible split brain! kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:04 +0000 UTC Warning Pod gr-self-healing-router-84cf595657-jl654.spec.containers{router} Unhealthy Readiness probe failed: Read-write route is not healthy kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:12 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Liveness probe failed: 2024/10/30 02:37:12 in primary partition: false 2024/10/30 02:37:12 liveness check failed: possible split brain! kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:22 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Liveness probe failed: 2024/10/30 02:37:22 in primary partition: false 2024/10/30 02:37:22 liveness check failed: possible split brain! kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:22 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Container mysql failed liveness probe, will be restarted kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:36 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: command "/opt/percona/healthcheck readiness" timed out kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary TimeUp Time up according to the duration logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update desiredPhase of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update records of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Recovered Successfully recover chaos for kuttl-test-ethical-swan/gr-self-healing-mysql-0 logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:50 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update records of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:50 +0000 UTC Normal PodNetworkChaos.chaos-mesh.org gr-self-healing-mysql-0 Updated Successfully update ObservedGeneration and FailedMessage of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:37:53 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash FinalizerInited Finalizer has been inited logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Updated Successfully update finalizer of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Updated Successfully update desiredPhase of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-ethical-swan/gr-self-healing-mysql-1 logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-ethical-swan/gr-self-healing-mysql-2 logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-ethical-swan/gr-self-healing-router-84cf595657-v2vs4 logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-ethical-swan/gr-self-healing-router-84cf595657-dgdlj logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-ethical-swan/gr-self-healing-mysql-0 logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-ethical-swan/gr-self-healing-router-84cf595657-jl654 logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dgdlj.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Warning Pod gr-self-healing-router-84cf595657-dgdlj.spec.containers{router} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 69a9a72cf6192203c1007084857ddc72723a5d95c7c8e51a205164915728aaf6 not found: not found kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-jl654.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Warning Pod gr-self-healing-router-84cf595657-jl654.spec.containers{router} Unhealthy Readiness probe failed: {"isAlive":true} kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-router-84cf595657-q5mzg to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vh1s default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-v2vs4.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:34 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-84cf595657 SuccessfulCreate Created pod: gr-self-healing-router-84cf595657-q5mzg replicaset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Updated Successfully update records of resource logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal Pod gr-self-healing-mysql-0 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-mysql-0 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vh1s default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Warning Pod gr-self-healing-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-c5102b54-aa8e-45b6-b15e-dc1a98751a30" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal Pod gr-self-healing-mysql-1 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-mysql-1 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-01xs default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Warning Pod gr-self-healing-mysql-1 FailedAttachVolume Multi-Attach error for volume "pvc-72c23bf5-5629-4b4d-b275-4b4ffe7024af" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-router-84cf595657-dw4wp to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-01xs default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 296ms (296ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg.spec.initContainers{router-init} Created Created container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-router-84cf595657-tzwq5 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vz92 default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 329ms (329ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-84cf595657 SuccessfulCreate Created pod: gr-self-healing-router-84cf595657-tzwq5 replicaset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:35 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-84cf595657 SuccessfulCreate Created pod: gr-self-healing-router-84cf595657-dw4wp replicaset-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:36 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 321ms (321ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:36 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.initContainers{router-init} Created Created container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:36 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:36 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.initContainers{router-init} Created Created container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:36 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:37 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:37 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 301ms (301ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:37 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.containers{router} Created Created container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:37 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.containers{router} Started Started container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:37 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:37 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 300ms (300ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:37 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg.spec.containers{router} Created Created container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:37 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg.spec.containers{router} Started Started container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:37 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:37 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 294ms (294ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:38 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 258ms (258ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:38 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.containers{router} Created Created container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:38 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.containers{router} Started Started container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:39 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 290ms (290ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:43 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 335ms (335ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:44 +0000 UTC Warning Pod gr-self-healing-router-84cf595657-dw4wp.spec.containers{router} BackOff Back-off restarting failed container router in pod gr-self-healing-router-84cf595657-dw4wp_kuttl-test-ethical-swan(a141d6c4-91b0-49d6-b64e-91dafc998df7) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:44 +0000 UTC Warning Pod gr-self-healing-router-84cf595657-tzwq5.spec.containers{router} BackOff Back-off restarting failed container router in pod gr-self-healing-router-84cf595657-tzwq5_kuttl-test-ethical-swan(25763c22-29c8-4540-b87a-0d9e0e46425e) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:49 +0000 UTC Warning Pod gr-self-healing-router-84cf595657-q5mzg.spec.containers{router} BackOff Back-off restarting failed container router in pod gr-self-healing-router-84cf595657-q5mzg_kuttl-test-ethical-swan(3a55f9e1-61c1-4af8-b263-18f2e2eb9bc7) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:38:56 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 295ms (295ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:00 +0000 UTC Normal Pod gr-self-healing-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-72c23bf5-5629-4b4d-b275-4b4ffe7024af" attachdetach-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:00 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 306ms (306ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:02 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:02 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 293ms (293ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:02 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:02 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:04 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:04 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 278ms (278ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:04 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:04 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:04 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:05 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 290ms (290ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:05 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:05 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:05 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 310ms (310ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:24 +0000 UTC Normal Pod gr-self-healing-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-c5102b54-aa8e-45b6-b15e-dc1a98751a30" attachdetach-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:26 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:26 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 311ms (312ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:26 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:26 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:27 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 324ms (324ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 308ms (308ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:30 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 324ms (324ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:31 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 328ms (328ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:39:35 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-q5mzg.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 302ms (302ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:40:41 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: 2024/10/30 02:39:45 Bootstrap starting... 2024/10/30 02:39:45 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan', {'clearReadOnly': true}) WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 applierWorkerThreads will be set to the default value of 4. The instance 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is valid to be used in an InnoDB cluster. The instance 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is already ready to be used in an InnoDB cluster. Successfully enabled parallel appliers. 2024/10/30 02:39:46 Instance (gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan) configured to join to the InnoDB cluster 2024/10/30 02:39:46 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan] 2024/10/30 02:39:46 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance belongs to that metadata, but GR is not active) (MYSQLSH 51314) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:39:46 Failed get cluster from peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan, stdout: stderr: WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance belongs to that metadata, but GR is not active) (MYSQLSH 51314) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:39:46 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. 2024/10/30 02:39:47 Connected to peer gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan 2024/10/30 02:39:48 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Topology: Member 0 Address: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: ONLINE Errors: [] 2024/10/30 02:39:48 Adding instance (gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan) to InnoDB cluster 2024/10/30 02:39:48 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan', {'recoveryMethod': 'clone', 'waitRecovery': 3}) WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: The instance 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is running auto-rejoin process, which will be cancelled. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306... This instance reports its own address as gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 is being cloned from gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 ** Stage DROP DATA: \ ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% In Progress** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:40:41 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:40:41 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:40:41 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 312ms (312ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:16 +0000 UTC Normal Pod gr-self-healing-mysql-2 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-mysql-2 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vz92 default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:23 +0000 UTC Normal Pod gr-self-healing-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-4b203ff8-4d81-4453-b59c-938d33560067" attachdetach-controller logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:25 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:26 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 329ms (329ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:26 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:26 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:28 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:28 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 317ms (317ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:28 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:28 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:28 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:29 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 292ms (292ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:29 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:41:29 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:42:42 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2024/10/30 02:41:45 Bootstrap starting... 2024/10/30 02:41:45 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan', {'clearReadOnly': true}) WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 applierWorkerThreads will be set to the default value of 4. The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is valid to be used in an InnoDB cluster. The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is already ready to be used in an InnoDB cluster. Successfully enabled parallel appliers. 2024/10/30 02:41:46 Instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan) configured to join to the InnoDB cluster 2024/10/30 02:41:46 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan] 2024/10/30 02:41:46 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. 2024/10/30 02:41:46 Connected to peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan 2024/10/30 02:41:47 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Topology: Member 0 Address: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: ONLINE Errors: [] Member 1 Address: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: ONLINE Errors: [] 2024/10/30 02:41:47 Adding instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan) to InnoDB cluster 2024/10/30 02:41:47 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan', {'recoveryMethod': 'clone', 'waitRecovery': 3}) WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is running auto-rejoin process, which will be cancelled. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 is being cloned from gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 ** Stage DROP DATA: \ ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:42:42 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:42:42 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:42:42 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 396ms (396ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:44 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:44 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:44 +0000 UTC Normal Pod gr-self-healing-mysql-0 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-mysql-0 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-vh1s default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:44 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:44 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:44 +0000 UTC Normal Pod gr-self-healing-mysql-1 Scheduled Successfully assigned kuttl-test-ethical-swan/gr-self-healing-mysql-1 to gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-01xs default-scheduler logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:44 +0000 UTC Warning Pod gr-self-healing-mysql-1 FailedMount Unable to attach or mount volumes: unmounted volumes=[datadir], unattached volumes=[], failed to process volumes=[datadir]: error processing PVC kuttl-test-ethical-swan/datadir-gr-self-healing-mysql-1: failed to fetch PVC from API server: persistentvolumeclaims "datadir-gr-self-healing-mysql-1" is forbidden: User "system:node:gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-01xs" cannot get resource "persistentvolumeclaims" in API group "" in the namespace "kuttl-test-ethical-swan": no relationship found between node 'gke-jen-ps-764-2039b0b5--default-pool-e4ffff32-01xs' and this object kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:45 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:45 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 330ms (330ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:45 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:45 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:45 +0000 UTC Warning Pod gr-self-healing-router-84cf595657-q5mzg.spec.containers{router} Unhealthy Readiness probe failed: Read-write route is not healthy kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:47 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:47 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 286ms (286ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:47 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:48 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:48 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:48 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 301ms (301ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:48 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:43:48 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:06 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:06 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-764-2039b0b5" in 316ms (316ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:06 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:06 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:08 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:08 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 273ms (273ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:08 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:08 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:08 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:09 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 260ms (260ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:09 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:09 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:29 +0000 UTC Warning Pod gr-self-healing-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2024/10/30 02:44:26 Bootstrap starting... 2024/10/30 02:44:26 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan', {'clearReadOnly': true}) WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 applierWorkerThreads will be set to the default value of 4. The instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is valid to be used in an InnoDB cluster. The instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is already ready to be used in an InnoDB cluster. Successfully enabled parallel appliers. 2024/10/30 02:44:26 Instance (gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan) configured to join to the InnoDB cluster 2024/10/30 02:44:26 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan] 2024/10/30 02:44:26 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance belongs to that metadata, but GR is not active) (MYSQLSH 51314) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:44:27 Failed get cluster from peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan, stdout: stderr: WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance belongs to that metadata, but GR is not active) (MYSQLSH 51314) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:44:27 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance does not belong to that metadata, and GR is not active) (RuntimeError) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:44:27 Failed get cluster from peer gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan, stdout: stderr: WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance does not belong to that metadata, and GR is not active) (RuntimeError) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:44:27 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. 2024/10/30 02:44:28 Connected to peer gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan 2024/10/30 02:44:29 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE_PARTIAL StatusText: Cluster is NOT tolerant to any failures. 1 member is not active. SSL: REQUIRED Primary: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Topology: Member 0 Address: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: (MISSING) Errors: [NOTE: group_replication is stopped.] Member 1 Address: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: ONLINE Errors: [] 2024/10/30 02:44:29 Adding instance (gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan) to InnoDB cluster 2024/10/30 02:44:29 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan', {'recoveryMethod': 'clone', 'waitRecovery': 3}) WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. ERROR: The operation cannot be executed because it failed to acquire the Cluster lock through primary member 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306'. Another operation requiring access to the member is still in progress, please wait for it to finish and try again. Cluster.addInstance: Failed to acquire Cluster lock through primary member 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' (MYSQLSH 51500) at (command line):1:33 in dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan', {'recoveryMethod': 'clone', 'waitRecovery': 3}) ^ 2024/10/30 02:44:29 bootstrap finished in 3.493086 seconds 2024/10/30 02:44:29 bootstrap failed: add instance: exit status 1 kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:29 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:34 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 274ms (274ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:44:59 +0000 UTC Warning Pod gr-self-healing-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2024/10/30 02:44:56 Bootstrap starting... 2024/10/30 02:44:56 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan', {'clearReadOnly': true}) WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 applierWorkerThreads will be set to the default value of 4. The instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is valid to be used in an InnoDB cluster. The instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' is already ready to be used in an InnoDB cluster. Successfully enabled parallel appliers. 2024/10/30 02:44:56 Instance (gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan) configured to join to the InnoDB cluster 2024/10/30 02:44:56 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan] 2024/10/30 02:44:56 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance belongs to that metadata, but GR is not active) (MYSQLSH 51314) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:44:57 Failed get cluster from peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan, stdout: stderr: WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance belongs to that metadata, but GR is not active) (MYSQLSH 51314) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:44:57 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance does not belong to that metadata, and GR is not active) (RuntimeError) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:44:57 Failed get cluster from peer gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan, stdout: stderr: WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance does not belong to that metadata, and GR is not active) (RuntimeError) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2024/10/30 02:44:57 Running dba.getCluster('grselfhealing') WARNING: Using a password on the command line interface can be insecure. 2024/10/30 02:44:58 Connected to peer gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan 2024/10/30 02:44:59 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE_PARTIAL StatusText: Cluster is NOT tolerant to any failures. 1 member is not active. SSL: REQUIRED Primary: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 Topology: Member 0 Address: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: (MISSING) Errors: [NOTE: group_replication is stopped.] Member 1 Address: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306 State: ONLINE Errors: [WARNING: The replication recovery account in use by the instance is not stored in the metadata. Use Cluster.rescan() to update the metadata.] 2024/10/30 02:44:59 Adding instance (gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan) to InnoDB cluster 2024/10/30 02:44:59 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan', {'recoveryMethod': 'clone', 'waitRecovery': 3}) WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. ERROR: The operation cannot be executed because it failed to acquire the Cluster lock through primary member 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306'. Another operation requiring access to the member is still in progress, please wait for it to finish and try again. Cluster.addInstance: Failed to acquire Cluster lock through primary member 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-ethical-swan:3306' (MYSQLSH 51500) at (command line):1:33 in dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-ethical-swan', {'recoveryMethod': 'clone', 'waitRecovery': 3}) ^ 2024/10/30 02:44:59 bootstrap finished in 3.444854 seconds 2024/10/30 02:44:59 bootstrap failed: add instance: exit status 1 kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:45:04 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 299ms (299ms including waiting) kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:46:26 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-cfqsj.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:46:26 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-drphc.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:46:26 +0000 UTC Normal Pod chaos-controller-manager-7799d775db-pkgmc.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:46:26 +0000 UTC Normal ConfigMap chaos-mesh LeaderElection chaos-controller-manager-7799d775db-drphc_2342e3e1-bf94-4a82-ac34-b95153090417 stopped leading logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:46:27 +0000 UTC Normal Pod chaos-daemon-2hz68.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:46:27 +0000 UTC Normal Pod chaos-daemon-2phzs.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:46:27 +0000 UTC Normal Pod chaos-daemon-nm8j4.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:46:27 +0000 UTC Normal Lease.coordination.k8s.io chaos-mesh LeaderElection chaos-controller-manager-7799d775db-drphc_2342e3e1-bf94-4a82-ac34-b95153090417 stopped leading logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:47:10 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:47:10 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:47:10 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:47:10 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:47:10 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-dw4wp.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:47:10 +0000 UTC Normal Pod gr-self-healing-router-84cf595657-tzwq5.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:47:15 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2024/10/30 02:47:15 readiness check failed: connect to db: ping DB: dial tcp 10.25.122.46:33062: connect: connection refused kubelet logger.go:42: 02:47:17 | gr-self-healing | 2024-10-30 02:47:15 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2024/10/30 02:47:15 readiness check failed: connect to db: ping DB: dial tcp 10.25.120.29:33062: connect: connection refused kubelet logger.go:42: 02:47:17 | gr-self-healing | Deleting namespace: kuttl-test-ethical-swan === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- PASS: kuttl (1262.29s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/gr-self-healing (1261.85s) PASS