=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://34.171.81.190 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 34 tests === RUN kuttl/harness === RUN kuttl/harness/gr-self-healing === PAUSE kuttl/harness/gr-self-healing === CONT kuttl/harness/gr-self-healing logger.go:42: 09:00:42 | gr-self-healing | Creating namespace: kuttl-test-creative-mastodon logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | + source ../../functions logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ realpath ../../.. logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | ++++ pwd logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | ++ test_name=gr-self-healing logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ GIT_BRANCH=PR-869 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | ++++ which gdate logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | ++++ which date logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ oc get projects logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ : logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ kubectl get nodes logger.go:42: 09:00:42 | gr-self-healing/0-deploy-operator | +++ grep '^minikube' logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + init_temp_dir logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + rm -rf /tmp/kuttl/ps/gr-self-healing logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/gr-self-healing logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + deploy_operator logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + destroy_operator logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + true logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + true logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + create_namespace ps-operator logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + local namespace=ps-operator logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + [[ -n '' ]] logger.go:42: 09:00:43 | gr-self-healing/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 09:00:44 | gr-self-healing/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 09:00:44 | gr-self-healing/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 09:00:45 | gr-self-healing/0-deploy-operator | namespace/ps-operator created logger.go:42: 09:00:45 | gr-self-healing/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy/crd.yaml logger.go:42: 09:00:46 | gr-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 09:00:46 | gr-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 09:00:47 | gr-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 09:00:47 | gr-self-healing/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 09:00:47 | gr-self-healing/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy/cw-rbac.yaml logger.go:42: 09:00:48 | gr-self-healing/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 09:00:48 | gr-self-healing/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 09:00:48 | gr-self-healing/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 09:00:49 | gr-self-healing/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 09:00:49 | gr-self-healing/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 09:00:49 | gr-self-healing/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 09:00:49 | gr-self-healing/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 09:00:49 | gr-self-healing/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 09:00:49 | gr-self-healing/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:00:49 | gr-self-healing/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-869-ff26afb0"' /mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy/cw-operator.yaml logger.go:42: 09:00:50 | gr-self-healing/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 09:00:50 | gr-self-healing/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 09:00:50 | gr-self-healing/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 09:00:50 | gr-self-healing/0-deploy-operator | + kubectl -n kuttl-test-creative-mastodon apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf/secrets.yaml logger.go:42: 09:00:51 | gr-self-healing/0-deploy-operator | secret/test-secrets created logger.go:42: 09:00:51 | gr-self-healing/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 09:00:51 | gr-self-healing/0-deploy-operator | + kubectl -n kuttl-test-creative-mastodon apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf/ssl-secret.yaml logger.go:42: 09:00:52 | gr-self-healing/0-deploy-operator | secret/test-ssl created logger.go:42: 09:00:52 | gr-self-healing/0-deploy-operator | + deploy_client logger.go:42: 09:00:52 | gr-self-healing/0-deploy-operator | + kubectl -n kuttl-test-creative-mastodon apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf/client.yaml logger.go:42: 09:00:53 | gr-self-healing/0-deploy-operator | pod/mysql-client created logger.go:42: 09:00:53 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 09:00:53 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 09:00:54 | gr-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 09:00:55 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 09:00:55 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 09:00:55 | gr-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 09:00:57 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 09:00:57 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 09:00:57 | gr-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 09:00:58 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 09:00:58 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 09:00:59 | gr-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 09:01:00 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 09:01:00 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 09:01:00 | gr-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 09:01:02 | gr-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 09:01:02 | gr-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 09:01:02 | gr-self-healing/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 09:01:02 | gr-self-healing/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 09:01:02 | gr-self-healing/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 09:01:02 | gr-self-healing/0-deploy-operator | ASSERT PASS logger.go:42: 09:01:02 | gr-self-healing/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | starting test step 1-deploy-chaos-mesh logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_chaos_mesh] logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | + source ../../functions logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ realpath ../../.. logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | ++++ pwd logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | ++ test_name=gr-self-healing logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ GIT_BRANCH=PR-869 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | ++++ which gdate logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | ++++ which date logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ date=/usr/bin/date logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ oc get projects logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ : logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ kubectl get nodes logger.go:42: 09:01:02 | gr-self-healing/1-deploy-chaos-mesh | +++ grep '^minikube' logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | + deploy_chaos_mesh logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | + destroy_chaos_mesh logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | ++ tail -n1 logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '-F ' '{print $2}' logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | ++ sed s/NAMESPACE// logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-869/kubeconfig logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-869/kubeconfig logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | + local chaos_mesh_ns= logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | + '[' -n '' ']' logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 09:01:03 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | ++ grep validate-auth logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl api-resources logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 09:01:04 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:01:05 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get crd logger.go:42: 09:01:05 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh.org logger.go:42: 09:01:05 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete crd logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrolebinding logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrole logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 09:01:06 | gr-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:01:07 | gr-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrole logger.go:42: 09:01:07 | gr-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 09:01:07 | gr-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 09:01:07 | gr-self-healing/1-deploy-chaos-mesh | + helm repo add chaos-mesh https://charts.chaos-mesh.org logger.go:42: 09:01:07 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-869/kubeconfig logger.go:42: 09:01:07 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-869/kubeconfig logger.go:42: 09:01:07 | gr-self-healing/1-deploy-chaos-mesh | "chaos-mesh" has been added to your repositories logger.go:42: 09:01:07 | gr-self-healing/1-deploy-chaos-mesh | + '[' -n '' ']' logger.go:42: 09:01:07 | gr-self-healing/1-deploy-chaos-mesh | + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=kuttl-test-creative-mastodon --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 logger.go:42: 09:01:07 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-869/kubeconfig logger.go:42: 09:01:07 | gr-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-869/kubeconfig logger.go:42: 09:01:33 | gr-self-healing/1-deploy-chaos-mesh | NAME: chaos-mesh logger.go:42: 09:01:33 | gr-self-healing/1-deploy-chaos-mesh | LAST DEPLOYED: Wed Mar 19 09:01:19 2025 logger.go:42: 09:01:33 | gr-self-healing/1-deploy-chaos-mesh | NAMESPACE: kuttl-test-creative-mastodon logger.go:42: 09:01:33 | gr-self-healing/1-deploy-chaos-mesh | STATUS: deployed logger.go:42: 09:01:33 | gr-self-healing/1-deploy-chaos-mesh | REVISION: 1 logger.go:42: 09:01:33 | gr-self-healing/1-deploy-chaos-mesh | TEST SUITE: None logger.go:42: 09:01:33 | gr-self-healing/1-deploy-chaos-mesh | NOTES: logger.go:42: 09:01:33 | gr-self-healing/1-deploy-chaos-mesh | 1. Make sure chaos-mesh components are running logger.go:42: 09:01:33 | gr-self-healing/1-deploy-chaos-mesh | kubectl get pods --namespace kuttl-test-creative-mastodon -l app.kubernetes.io/instance=chaos-mesh logger.go:42: 09:01:33 | gr-self-healing/1-deploy-chaos-mesh | + sleep 10 [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 13 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc0002f3c00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc0002f3c00}, 0x0}, {0x184a055?, 0xc0005c5f80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc000462a10, {0x1accd90, 0xc00043c180}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc000181d48?, {0x0, 0xc000462a10, {0x1accd90, 0xc00043c180}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc000181d48, {0x0, 0xc000462a10, {0x1accd90, 0xc00043c180}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc000379208, 0x47?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc000720ea0, 0xc0003861a0, {0xc00004af40, 0x1c}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc000720ea0, 0xc0003861a0, {0xc00004af40, 0x1c}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc000282fa0, 0xc0003861a0, 0xc0003af950) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc0003861a0) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc0003861a0, 0xc0003820f0) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 24 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 09:01:46 | gr-self-healing/1-deploy-chaos-mesh | test step completed 1-deploy-chaos-mesh logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | starting test step 2-create-cluster logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="group-replication"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=false' - \ | yq eval '.spec.proxy.router.enabled=true' - \ | yq eval '.spec.proxy.router.size=3' - \ | yq eval '.spec.proxy.router.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=false' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | + source ../../functions logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ realpath ../../.. logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | ++++ pwd logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | ++ test_name=gr-self-healing logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ GIT_BRANCH=PR-869 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | ++++ which gdate logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | ++++ which date logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ date=/usr/bin/date logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ oc get projects logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ : logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ kubectl get nodes logger.go:42: 09:01:46 | gr-self-healing/2-create-cluster | +++ grep '^minikube' logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + get_cr logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + local name_suffix= logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="group-replication"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval .spec.proxy.haproxy.enabled=false - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval .spec.orchestrator.enabled=false - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval .spec.proxy.router.enabled=true - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval .spec.proxy.router.size=3 - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.proxy.router.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | ++ printf '.metadata.name="%s"' gr-self-healing logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + kubectl -n kuttl-test-creative-mastodon apply -f - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.metadata.name="gr-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy/cr.yaml logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + '[' -n '' ']' logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-869-ff26afb0"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:01:47 | gr-self-healing/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 09:01:48 | gr-self-healing/2-create-cluster | perconaservermysql.ps.percona.com/gr-self-healing created logger.go:42: 09:05:44 | gr-self-healing/2-create-cluster | test step completed 2-create-cluster logger.go:42: 09:05:44 | gr-self-healing/3-write-data | starting test step 3-write-data logger.go:42: 09:05:44 | gr-self-healing/3-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" sleep 5] logger.go:42: 09:05:44 | gr-self-healing/3-write-data | + source ../../functions logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ realpath ../../.. logger.go:42: 09:05:44 | gr-self-healing/3-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | ++++ pwd logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:05:44 | gr-self-healing/3-write-data | ++ test_name=gr-self-healing logger.go:42: 09:05:44 | gr-self-healing/3-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:05:44 | gr-self-healing/3-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ GIT_BRANCH=PR-869 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:05:44 | gr-self-healing/3-write-data | ++++ which gdate logger.go:42: 09:05:44 | gr-self-healing/3-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:05:44 | gr-self-healing/3-write-data | ++++ which date logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ date=/usr/bin/date logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ oc get projects logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ : logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ kubectl get nodes logger.go:42: 09:05:44 | gr-self-healing/3-write-data | +++ grep '^minikube' logger.go:42: 09:05:45 | gr-self-healing/3-write-data | +++ get_cluster_name logger.go:42: 09:05:45 | gr-self-healing/3-write-data | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:05:45 | gr-self-healing/3-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 09:05:45 | gr-self-healing/3-write-data | ++ local cluster=gr-self-healing logger.go:42: 09:05:45 | gr-self-healing/3-write-data | ++ echo gr-self-healing-router logger.go:42: 09:05:45 | gr-self-healing/3-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:05:45 | gr-self-healing/3-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 09:05:45 | gr-self-healing/3-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:05:45 | gr-self-healing/3-write-data | + local pod= logger.go:42: 09:05:45 | gr-self-healing/3-write-data | ++ get_client_pod logger.go:42: 09:05:45 | gr-self-healing/3-write-data | ++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:05:46 | gr-self-healing/3-write-data | + client_pod=mysql-client logger.go:42: 09:05:46 | gr-self-healing/3-write-data | + wait_pod mysql-client logger.go:42: 09:05:46 | gr-self-healing/3-write-data | + local pod=mysql-client logger.go:42: 09:05:46 | gr-self-healing/3-write-data | + set +o xtrace logger.go:42: 09:05:46 | gr-self-healing/3-write-data | mysql-clienttrue logger.go:42: 09:05:46 | gr-self-healing/3-write-data | + sed -e 's/mysql: //' logger.go:42: 09:05:46 | gr-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:05:46 | gr-self-healing/3-write-data | + kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:05:48 | gr-self-healing/3-write-data | + : logger.go:42: 09:05:48 | gr-self-healing/3-write-data | +++ get_cluster_name logger.go:42: 09:05:48 | gr-self-healing/3-write-data | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:05:48 | gr-self-healing/3-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 09:05:48 | gr-self-healing/3-write-data | ++ local cluster=gr-self-healing logger.go:42: 09:05:48 | gr-self-healing/3-write-data | ++ echo gr-self-healing-router logger.go:42: 09:05:48 | gr-self-healing/3-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:05:48 | gr-self-healing/3-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 09:05:48 | gr-self-healing/3-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:05:48 | gr-self-healing/3-write-data | + local pod= logger.go:42: 09:05:48 | gr-self-healing/3-write-data | ++ get_client_pod logger.go:42: 09:05:48 | gr-self-healing/3-write-data | ++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:05:48 | gr-self-healing/3-write-data | + client_pod=mysql-client logger.go:42: 09:05:48 | gr-self-healing/3-write-data | + wait_pod mysql-client logger.go:42: 09:05:48 | gr-self-healing/3-write-data | + local pod=mysql-client logger.go:42: 09:05:48 | gr-self-healing/3-write-data | + set +o xtrace logger.go:42: 09:05:49 | gr-self-healing/3-write-data | mysql-clienttrue logger.go:42: 09:05:49 | gr-self-healing/3-write-data | + kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:05:49 | gr-self-healing/3-write-data | + sed -e 's/mysql: //' logger.go:42: 09:05:49 | gr-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:05:50 | gr-self-healing/3-write-data | + : logger.go:42: 09:05:50 | gr-self-healing/3-write-data | + sleep 5 logger.go:42: 09:05:55 | gr-self-healing/3-write-data | test step completed 3-write-data logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | starting test step 4-read-from-primary logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_mysql "SELECT * FROM myDB.myTable" "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 04-read-from-primary --from-literal=data="${data}"] logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | + source ../../functions logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ realpath ../../.. logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | ++++ pwd logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | ++ test_name=gr-self-healing logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ GIT_BRANCH=PR-869 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | ++++ which gdate logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | ++++ which date logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ date=/usr/bin/date logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ oc get projects logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ : logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ kubectl get nodes logger.go:42: 09:05:55 | gr-self-healing/4-read-from-primary | +++ grep '^minikube' logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | ++++ get_cluster_name logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | ++++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | +++ get_mysql_router_service gr-self-healing logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | +++ local cluster=gr-self-healing logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | +++ echo gr-self-healing-router logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | ++ local pod= logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | +++ get_client_pod logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | ++ client_pod=mysql-client logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | ++ wait_pod mysql-client logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | ++ local pod=mysql-client logger.go:42: 09:05:56 | gr-self-healing/4-read-from-primary | ++ set +o xtrace logger.go:42: 09:05:57 | gr-self-healing/4-read-from-primary | mysql-clienttrue logger.go:42: 09:05:57 | gr-self-healing/4-read-from-primary | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:05:57 | gr-self-healing/4-read-from-primary | ++ sed -e 's/mysql: //' logger.go:42: 09:05:57 | gr-self-healing/4-read-from-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:05:58 | gr-self-healing/4-read-from-primary | + data=100500 logger.go:42: 09:05:58 | gr-self-healing/4-read-from-primary | + kubectl create configmap -n kuttl-test-creative-mastodon 04-read-from-primary --from-literal=data=100500 logger.go:42: 09:05:59 | gr-self-healing/4-read-from-primary | configmap/04-read-from-primary created logger.go:42: 09:05:59 | gr-self-healing/4-read-from-primary | test step completed 4-read-from-primary logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | starting test step 5-kill-primary logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_pod="$(get_primary_from_group_replication)" kill_pods "${NAMESPACE}" "pod" "$init_pod" "" "primary" sleep 10 # wait a bit for pod to be killed if [ "$init_pod" == "$(get_primary_from_group_replication)" ]; then echo "primary pod was not killed! something went wrong." exit 1 fi] logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | + source ../../functions logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ realpath ../../.. logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | ++++ pwd logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | ++ test_name=gr-self-healing logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ GIT_BRANCH=PR-869 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | ++++ which gdate logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | ++++ which date logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ date=/usr/bin/date logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ oc get projects logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ : logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ kubectl get nodes logger.go:42: 09:05:59 | gr-self-healing/5-kill-primary | +++ grep '^minikube' logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++ get_primary_from_group_replication logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++ cut -d. -f1 logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++++ get_cluster_name logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | +++ get_mysql_router_service gr-self-healing logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | +++ local cluster=gr-self-healing logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | +++ echo gr-self-healing-router logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++ local pod= logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | +++ get_client_pod logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++ client_pod=mysql-client logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++ wait_pod mysql-client logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++ local pod=mysql-client logger.go:42: 09:06:00 | gr-self-healing/5-kill-primary | ++ set +o xtrace logger.go:42: 09:06:01 | gr-self-healing/5-kill-primary | mysql-clienttrue logger.go:42: 09:06:01 | gr-self-healing/5-kill-primary | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:06:01 | gr-self-healing/5-kill-primary | ++ sed -e 's/mysql: //' logger.go:42: 09:06:01 | gr-self-healing/5-kill-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | + init_pod=gr-self-healing-mysql-0 logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | + kill_pods kuttl-test-creative-mastodon pod gr-self-healing-mysql-0 '' primary logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | + local ns=kuttl-test-creative-mastodon logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | + local selector=pod logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | + local pod_label=gr-self-healing-mysql-0 logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | + local label_value= logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | + local chaos_suffix=primary logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | + '[' pod == pod ']' logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | + yq eval ' logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | .metadata.name = "chaos-pod-kill-primary" | logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | del(.spec.selector.pods.test-namespace) | logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | .spec.selector.pods.kuttl-test-creative-mastodon[0] = "gr-self-healing-mysql-0"' /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf/chaos-pod-kill.yml logger.go:42: 09:06:02 | gr-self-healing/5-kill-primary | + kubectl apply --namespace kuttl-test-creative-mastodon -f - logger.go:42: 09:06:04 | gr-self-healing/5-kill-primary | podchaos.chaos-mesh.org/chaos-pod-kill-primary created logger.go:42: 09:06:04 | gr-self-healing/5-kill-primary | + sleep 5 logger.go:42: 09:06:09 | gr-self-healing/5-kill-primary | + sleep 10 logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | ++ get_primary_from_group_replication logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | ++ cut -d. -f1 logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | ++++ get_cluster_name logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | ++++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | +++ get_mysql_router_service gr-self-healing logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | +++ local cluster=gr-self-healing logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | +++ echo gr-self-healing-router logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | ++ local pod= logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | +++ get_client_pod logger.go:42: 09:06:19 | gr-self-healing/5-kill-primary | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:06:20 | gr-self-healing/5-kill-primary | ++ client_pod=mysql-client logger.go:42: 09:06:20 | gr-self-healing/5-kill-primary | ++ wait_pod mysql-client logger.go:42: 09:06:20 | gr-self-healing/5-kill-primary | ++ local pod=mysql-client logger.go:42: 09:06:20 | gr-self-healing/5-kill-primary | ++ set +o xtrace logger.go:42: 09:06:20 | gr-self-healing/5-kill-primary | mysql-clienttrue logger.go:42: 09:06:20 | gr-self-healing/5-kill-primary | ++ sed -e 's/mysql: //' logger.go:42: 09:06:20 | gr-self-healing/5-kill-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:06:20 | gr-self-healing/5-kill-primary | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:06:22 | gr-self-healing/5-kill-primary | + '[' gr-self-healing-mysql-0 == gr-self-healing-mysql-2 ']' logger.go:42: 09:07:19 | gr-self-healing/5-kill-primary | test step completed 5-kill-primary logger.go:42: 09:07:19 | gr-self-healing/6-write-data | starting test step 6-write-data logger.go:42: 09:07:19 | gr-self-healing/6-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "INSERT myDB.myTable (id) VALUES (100501)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" sleep 5] logger.go:42: 09:07:19 | gr-self-healing/6-write-data | + source ../../functions logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ realpath ../../.. logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++++ pwd logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++ test_name=gr-self-healing logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ GIT_BRANCH=PR-869 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++++ which gdate logger.go:42: 09:07:19 | gr-self-healing/6-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++++ which date logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ date=/usr/bin/date logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ oc get projects logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ : logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ kubectl get nodes logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ grep '^minikube' logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ get_cluster_name logger.go:42: 09:07:19 | gr-self-healing/6-write-data | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++ local cluster=gr-self-healing logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++ echo gr-self-healing-router logger.go:42: 09:07:19 | gr-self-healing/6-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100501)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:07:19 | gr-self-healing/6-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100501)' logger.go:42: 09:07:19 | gr-self-healing/6-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:07:19 | gr-self-healing/6-write-data | + local pod= logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++ get_client_pod logger.go:42: 09:07:19 | gr-self-healing/6-write-data | ++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:07:20 | gr-self-healing/6-write-data | + client_pod=mysql-client logger.go:42: 09:07:20 | gr-self-healing/6-write-data | + wait_pod mysql-client logger.go:42: 09:07:20 | gr-self-healing/6-write-data | + local pod=mysql-client logger.go:42: 09:07:20 | gr-self-healing/6-write-data | + set +o xtrace logger.go:42: 09:07:20 | gr-self-healing/6-write-data | mysql-clienttrue logger.go:42: 09:07:20 | gr-self-healing/6-write-data | + kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100501)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:07:20 | gr-self-healing/6-write-data | + sed -e 's/mysql: //' logger.go:42: 09:07:20 | gr-self-healing/6-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:07:22 | gr-self-healing/6-write-data | + : logger.go:42: 09:07:22 | gr-self-healing/6-write-data | + sleep 5 logger.go:42: 09:07:27 | gr-self-healing/6-write-data | test step completed 6-write-data logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | starting test step 7-read-from-replicas logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions for i in 0 1 2; do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 07-read-from-replicas-${i} --from-literal=data="${data}" done] logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | + source ../../functions logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ realpath ../../.. logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | ++++ pwd logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | ++ test_name=gr-self-healing logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ GIT_BRANCH=PR-869 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | ++++ which gdate logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | ++++ which date logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ oc get projects logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ : logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ kubectl get nodes logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ grep '^minikube' logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ get_cluster_name logger.go:42: 09:07:27 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 0 logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ local index=0 logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ echo gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | + host=gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ local pod= logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | +++ get_client_pod logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ set +o xtrace logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | mysql-clienttrue logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:07:28 | gr-self-healing/7-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:07:30 | gr-self-healing/7-read-from-replicas | + data='100500 logger.go:42: 09:07:30 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 09:07:30 | gr-self-healing/7-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 07-read-from-replicas-0 '--from-literal=data=100500 logger.go:42: 09:07:30 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 09:07:30 | gr-self-healing/7-read-from-replicas | configmap/07-read-from-replicas-0 created logger.go:42: 09:07:30 | gr-self-healing/7-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:07:30 | gr-self-healing/7-read-from-replicas | +++ get_cluster_name logger.go:42: 09:07:30 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 1 logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ local index=1 logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ echo gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | + host=gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ local pod= logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | +++ get_client_pod logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ set +o xtrace logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | mysql-clienttrue logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:07:31 | gr-self-healing/7-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:07:33 | gr-self-healing/7-read-from-replicas | + data='100500 logger.go:42: 09:07:33 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 09:07:33 | gr-self-healing/7-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 07-read-from-replicas-1 '--from-literal=data=100500 logger.go:42: 09:07:33 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 09:07:33 | gr-self-healing/7-read-from-replicas | configmap/07-read-from-replicas-1 created logger.go:42: 09:07:33 | gr-self-healing/7-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:07:33 | gr-self-healing/7-read-from-replicas | +++ get_cluster_name logger.go:42: 09:07:33 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 2 logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ local index=2 logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ echo gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | + host=gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ local pod= logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | +++ get_client_pod logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ set +o xtrace logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | mysql-clienttrue logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:07:34 | gr-self-healing/7-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:07:36 | gr-self-healing/7-read-from-replicas | + data='100500 logger.go:42: 09:07:36 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 09:07:36 | gr-self-healing/7-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 07-read-from-replicas-2 '--from-literal=data=100500 logger.go:42: 09:07:36 | gr-self-healing/7-read-from-replicas | 100501' logger.go:42: 09:07:36 | gr-self-healing/7-read-from-replicas | configmap/07-read-from-replicas-2 created logger.go:42: 09:07:37 | gr-self-healing/7-read-from-replicas | test step completed 7-read-from-replicas logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | starting test step 8-failure-primary logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions failure_pod "${NAMESPACE}" "$(get_primary_from_group_replication)" "primary" sleep 10 # wait a bit for pod to be killed] logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | + source ../../functions logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ realpath ../../.. logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | ++++ pwd logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | ++ test_name=gr-self-healing logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ GIT_BRANCH=PR-869 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | ++++ which gdate logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | ++++ which date logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ date=/usr/bin/date logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ oc get projects logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ : logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ kubectl get nodes logger.go:42: 09:07:37 | gr-self-healing/8-failure-primary | +++ grep '^minikube' logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++ get_primary_from_group_replication logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++ cut -d. -f1 logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++++ get_cluster_name logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | +++ get_mysql_router_service gr-self-healing logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | +++ local cluster=gr-self-healing logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | +++ echo gr-self-healing-router logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++ local pod= logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | +++ get_client_pod logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++ client_pod=mysql-client logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++ wait_pod mysql-client logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++ local pod=mysql-client logger.go:42: 09:07:38 | gr-self-healing/8-failure-primary | ++ set +o xtrace logger.go:42: 09:07:39 | gr-self-healing/8-failure-primary | mysql-clienttrue logger.go:42: 09:07:39 | gr-self-healing/8-failure-primary | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:07:39 | gr-self-healing/8-failure-primary | ++ sed -e 's/mysql: //' logger.go:42: 09:07:39 | gr-self-healing/8-failure-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:07:40 | gr-self-healing/8-failure-primary | + failure_pod kuttl-test-creative-mastodon gr-self-healing-mysql-2 primary logger.go:42: 09:07:40 | gr-self-healing/8-failure-primary | + local ns=kuttl-test-creative-mastodon logger.go:42: 09:07:40 | gr-self-healing/8-failure-primary | + local pod=gr-self-healing-mysql-2 logger.go:42: 09:07:40 | gr-self-healing/8-failure-primary | + local chaos_suffix=primary logger.go:42: 09:07:40 | gr-self-healing/8-failure-primary | + yq eval ' logger.go:42: 09:07:40 | gr-self-healing/8-failure-primary | .metadata.name = "chaos-pod-failure-primary" | logger.go:42: 09:07:40 | gr-self-healing/8-failure-primary | del(.spec.selector.pods.test-namespace) | logger.go:42: 09:07:40 | gr-self-healing/8-failure-primary | .spec.selector.pods.kuttl-test-creative-mastodon[0] = "gr-self-healing-mysql-2"' /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf/chaos-pod-failure.yml logger.go:42: 09:07:40 | gr-self-healing/8-failure-primary | + kubectl apply --namespace kuttl-test-creative-mastodon -f - logger.go:42: 09:07:42 | gr-self-healing/8-failure-primary | podchaos.chaos-mesh.org/chaos-pod-failure-primary created logger.go:42: 09:07:42 | gr-self-healing/8-failure-primary | + sleep 5 logger.go:42: 09:07:47 | gr-self-healing/8-failure-primary | + sleep 10 logger.go:42: 09:09:59 | gr-self-healing/8-failure-primary | test step completed 8-failure-primary logger.go:42: 09:09:59 | gr-self-healing/9-write-data | starting test step 9-write-data logger.go:42: 09:09:59 | gr-self-healing/9-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "INSERT myDB.myTable (id) VALUES (100502)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" sleep 5] logger.go:42: 09:09:59 | gr-self-healing/9-write-data | + source ../../functions logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ realpath ../../.. logger.go:42: 09:09:59 | gr-self-healing/9-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | ++++ pwd logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:09:59 | gr-self-healing/9-write-data | ++ test_name=gr-self-healing logger.go:42: 09:09:59 | gr-self-healing/9-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:09:59 | gr-self-healing/9-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ GIT_BRANCH=PR-869 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:09:59 | gr-self-healing/9-write-data | ++++ which gdate logger.go:42: 09:09:59 | gr-self-healing/9-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:09:59 | gr-self-healing/9-write-data | ++++ which date logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ date=/usr/bin/date logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ oc get projects logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ : logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ kubectl get nodes logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ grep '^minikube' logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ get_cluster_name logger.go:42: 09:09:59 | gr-self-healing/9-write-data | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:10:00 | gr-self-healing/9-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 09:10:00 | gr-self-healing/9-write-data | ++ local cluster=gr-self-healing logger.go:42: 09:10:00 | gr-self-healing/9-write-data | ++ echo gr-self-healing-router logger.go:42: 09:10:00 | gr-self-healing/9-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100502)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:10:00 | gr-self-healing/9-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100502)' logger.go:42: 09:10:00 | gr-self-healing/9-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:10:00 | gr-self-healing/9-write-data | + local pod= logger.go:42: 09:10:00 | gr-self-healing/9-write-data | ++ get_client_pod logger.go:42: 09:10:00 | gr-self-healing/9-write-data | ++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:10:00 | gr-self-healing/9-write-data | + client_pod=mysql-client logger.go:42: 09:10:00 | gr-self-healing/9-write-data | + wait_pod mysql-client logger.go:42: 09:10:00 | gr-self-healing/9-write-data | + local pod=mysql-client logger.go:42: 09:10:00 | gr-self-healing/9-write-data | + set +o xtrace logger.go:42: 09:10:00 | gr-self-healing/9-write-data | mysql-clienttrue logger.go:42: 09:10:00 | gr-self-healing/9-write-data | + kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100502)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:10:00 | gr-self-healing/9-write-data | + sed -e 's/mysql: //' logger.go:42: 09:10:00 | gr-self-healing/9-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:10:02 | gr-self-healing/9-write-data | + : logger.go:42: 09:10:02 | gr-self-healing/9-write-data | + sleep 5 logger.go:42: 09:10:07 | gr-self-healing/9-write-data | test step completed 9-write-data logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | starting test step 10-read-from-replicas logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions for i in 0 1 2; do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 10-read-from-replicas-${i} --from-literal=data="${data}" done] logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | + source ../../functions logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ realpath ../../.. logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | ++++ pwd logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | ++ test_name=gr-self-healing logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ GIT_BRANCH=PR-869 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | ++++ which gdate logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | ++++ which date logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ oc get projects logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ : logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ kubectl get nodes logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ grep '^minikube' logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ get_cluster_name logger.go:42: 09:10:07 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 0 logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ local index=0 logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ echo gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | + host=gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ local pod= logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | +++ get_client_pod logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ set +o xtrace logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | mysql-clienttrue logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:10:08 | gr-self-healing/10-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:10:10 | gr-self-healing/10-read-from-replicas | + data='100500 logger.go:42: 09:10:10 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 09:10:10 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 09:10:10 | gr-self-healing/10-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 10-read-from-replicas-0 '--from-literal=data=100500 logger.go:42: 09:10:10 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 09:10:10 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | configmap/10-read-from-replicas-0 created logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | +++ get_cluster_name logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 1 logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ local index=1 logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ echo gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | + host=gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ local pod= logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | +++ get_client_pod logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:10:11 | gr-self-healing/10-read-from-replicas | ++ set +o xtrace logger.go:42: 09:10:12 | gr-self-healing/10-read-from-replicas | mysql-clienttrue logger.go:42: 09:10:12 | gr-self-healing/10-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:10:12 | gr-self-healing/10-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:10:12 | gr-self-healing/10-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:10:13 | gr-self-healing/10-read-from-replicas | + data='100500 logger.go:42: 09:10:13 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 09:10:13 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 09:10:13 | gr-self-healing/10-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 10-read-from-replicas-1 '--from-literal=data=100500 logger.go:42: 09:10:13 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 09:10:13 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | configmap/10-read-from-replicas-1 created logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | +++ get_cluster_name logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 2 logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ local index=2 logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ echo gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | + host=gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ local pod= logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | +++ get_client_pod logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:10:14 | gr-self-healing/10-read-from-replicas | ++ set +o xtrace logger.go:42: 09:10:15 | gr-self-healing/10-read-from-replicas | mysql-clienttrue logger.go:42: 09:10:15 | gr-self-healing/10-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:10:15 | gr-self-healing/10-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:10:15 | gr-self-healing/10-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:10:16 | gr-self-healing/10-read-from-replicas | + data='100500 logger.go:42: 09:10:16 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 09:10:16 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 09:10:16 | gr-self-healing/10-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 10-read-from-replicas-2 '--from-literal=data=100500 logger.go:42: 09:10:16 | gr-self-healing/10-read-from-replicas | 100501 logger.go:42: 09:10:16 | gr-self-healing/10-read-from-replicas | 100502' logger.go:42: 09:10:17 | gr-self-healing/10-read-from-replicas | configmap/10-read-from-replicas-2 created logger.go:42: 09:10:17 | gr-self-healing/10-read-from-replicas | test step completed 10-read-from-replicas logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | starting test step 11-network-loss-primary logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions network_loss "${NAMESPACE}" "$(get_primary_from_group_replication)" "primary" sleep 30 # wait for new master to get elected] logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | + source ../../functions logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ realpath ../../.. logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | ++++ pwd logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | ++ test_name=gr-self-healing logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ GIT_BRANCH=PR-869 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | ++++ which gdate logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | ++++ which date logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ date=/usr/bin/date logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ oc get projects logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ : logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ kubectl get nodes logger.go:42: 09:10:17 | gr-self-healing/11-network-loss-primary | +++ grep '^minikube' logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | ++ get_primary_from_group_replication logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | ++ cut -d. -f1 logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | ++++ get_cluster_name logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | ++++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | +++ get_mysql_router_service gr-self-healing logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | +++ local cluster=gr-self-healing logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | +++ echo gr-self-healing-router logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | ++ local pod= logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | +++ get_client_pod logger.go:42: 09:10:18 | gr-self-healing/11-network-loss-primary | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:10:19 | gr-self-healing/11-network-loss-primary | ++ client_pod=mysql-client logger.go:42: 09:10:19 | gr-self-healing/11-network-loss-primary | ++ wait_pod mysql-client logger.go:42: 09:10:19 | gr-self-healing/11-network-loss-primary | ++ local pod=mysql-client logger.go:42: 09:10:19 | gr-self-healing/11-network-loss-primary | ++ set +o xtrace logger.go:42: 09:10:19 | gr-self-healing/11-network-loss-primary | mysql-clienttrue logger.go:42: 09:10:19 | gr-self-healing/11-network-loss-primary | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:10:19 | gr-self-healing/11-network-loss-primary | ++ sed -e 's/mysql: //' logger.go:42: 09:10:19 | gr-self-healing/11-network-loss-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:10:21 | gr-self-healing/11-network-loss-primary | + network_loss kuttl-test-creative-mastodon gr-self-healing-mysql-0 primary logger.go:42: 09:10:21 | gr-self-healing/11-network-loss-primary | + local ns=kuttl-test-creative-mastodon logger.go:42: 09:10:21 | gr-self-healing/11-network-loss-primary | + local pod=gr-self-healing-mysql-0 logger.go:42: 09:10:21 | gr-self-healing/11-network-loss-primary | + local chaos_suffix=primary logger.go:42: 09:10:21 | gr-self-healing/11-network-loss-primary | + yq eval ' logger.go:42: 09:10:21 | gr-self-healing/11-network-loss-primary | .metadata.name = "chaos-pod-network-loss-primary" | logger.go:42: 09:10:21 | gr-self-healing/11-network-loss-primary | del(.spec.selector.pods.test-namespace) | logger.go:42: 09:10:21 | gr-self-healing/11-network-loss-primary | .spec.selector.pods.kuttl-test-creative-mastodon[0] = "gr-self-healing-mysql-0"' /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf/chaos-network-loss.yml logger.go:42: 09:10:21 | gr-self-healing/11-network-loss-primary | + kubectl apply --namespace kuttl-test-creative-mastodon -f - logger.go:42: 09:10:22 | gr-self-healing/11-network-loss-primary | networkchaos.chaos-mesh.org/chaos-pod-network-loss-primary created logger.go:42: 09:10:22 | gr-self-healing/11-network-loss-primary | + sleep 5 logger.go:42: 09:10:27 | gr-self-healing/11-network-loss-primary | + sleep 30 logger.go:42: 09:12:03 | gr-self-healing/11-network-loss-primary | test step completed 11-network-loss-primary logger.go:42: 09:12:03 | gr-self-healing/12-write-data | starting test step 12-write-data logger.go:42: 09:12:03 | gr-self-healing/12-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "INSERT myDB.myTable (id) VALUES (100503)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" sleep 5] logger.go:42: 09:12:03 | gr-self-healing/12-write-data | + source ../../functions logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ realpath ../../.. logger.go:42: 09:12:03 | gr-self-healing/12-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | ++++ pwd logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:12:03 | gr-self-healing/12-write-data | ++ test_name=gr-self-healing logger.go:42: 09:12:03 | gr-self-healing/12-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:12:03 | gr-self-healing/12-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ GIT_BRANCH=PR-869 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:12:03 | gr-self-healing/12-write-data | ++++ which gdate logger.go:42: 09:12:03 | gr-self-healing/12-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:12:03 | gr-self-healing/12-write-data | ++++ which date logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ date=/usr/bin/date logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ oc get projects logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ : logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ kubectl get nodes logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ grep '^minikube' logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ get_cluster_name logger.go:42: 09:12:03 | gr-self-healing/12-write-data | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:12:04 | gr-self-healing/12-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 09:12:04 | gr-self-healing/12-write-data | ++ local cluster=gr-self-healing logger.go:42: 09:12:04 | gr-self-healing/12-write-data | ++ echo gr-self-healing-router logger.go:42: 09:12:04 | gr-self-healing/12-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100503)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:12:04 | gr-self-healing/12-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100503)' logger.go:42: 09:12:04 | gr-self-healing/12-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:12:04 | gr-self-healing/12-write-data | + local pod= logger.go:42: 09:12:04 | gr-self-healing/12-write-data | ++ get_client_pod logger.go:42: 09:12:04 | gr-self-healing/12-write-data | ++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:12:04 | gr-self-healing/12-write-data | + client_pod=mysql-client logger.go:42: 09:12:04 | gr-self-healing/12-write-data | + wait_pod mysql-client logger.go:42: 09:12:04 | gr-self-healing/12-write-data | + local pod=mysql-client logger.go:42: 09:12:04 | gr-self-healing/12-write-data | + set +o xtrace logger.go:42: 09:12:04 | gr-self-healing/12-write-data | mysql-clienttrue logger.go:42: 09:12:04 | gr-self-healing/12-write-data | + kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100503)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:12:04 | gr-self-healing/12-write-data | + sed -e 's/mysql: //' logger.go:42: 09:12:04 | gr-self-healing/12-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:12:06 | gr-self-healing/12-write-data | + : logger.go:42: 09:12:06 | gr-self-healing/12-write-data | + sleep 5 logger.go:42: 09:12:11 | gr-self-healing/12-write-data | test step completed 12-write-data logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | starting test step 13-read-from-replicas logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions for i in 0 1 2; do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 13-read-from-replicas-${i} --from-literal=data="${data}" done] logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | + source ../../functions logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ realpath ../../.. logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | ++++ pwd logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | ++ test_name=gr-self-healing logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ GIT_BRANCH=PR-869 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | ++++ which gdate logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | ++++ which date logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ oc get projects logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ : logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ kubectl get nodes logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ grep '^minikube' logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ get_cluster_name logger.go:42: 09:12:11 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 0 logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ local index=0 logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ echo gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | + host=gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ local pod= logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | +++ get_client_pod logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:12:12 | gr-self-healing/13-read-from-replicas | ++ set +o xtrace logger.go:42: 09:12:13 | gr-self-healing/13-read-from-replicas | mysql-clienttrue logger.go:42: 09:12:13 | gr-self-healing/13-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:12:13 | gr-self-healing/13-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:12:13 | gr-self-healing/13-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:12:14 | gr-self-healing/13-read-from-replicas | + data='100500 logger.go:42: 09:12:14 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 09:12:14 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 09:12:14 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 09:12:14 | gr-self-healing/13-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 13-read-from-replicas-0 '--from-literal=data=100500 logger.go:42: 09:12:14 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 09:12:14 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 09:12:14 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | configmap/13-read-from-replicas-0 created logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | +++ get_cluster_name logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 1 logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ local index=1 logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ echo gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | + host=gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ local pod= logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | +++ get_client_pod logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:12:15 | gr-self-healing/13-read-from-replicas | ++ set +o xtrace logger.go:42: 09:12:16 | gr-self-healing/13-read-from-replicas | mysql-clienttrue logger.go:42: 09:12:16 | gr-self-healing/13-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:12:16 | gr-self-healing/13-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:12:16 | gr-self-healing/13-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:12:17 | gr-self-healing/13-read-from-replicas | + data='100500 logger.go:42: 09:12:17 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 09:12:17 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 09:12:17 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 09:12:17 | gr-self-healing/13-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 13-read-from-replicas-1 '--from-literal=data=100500 logger.go:42: 09:12:17 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 09:12:17 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 09:12:17 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | configmap/13-read-from-replicas-1 created logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | +++ get_cluster_name logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 2 logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ local index=2 logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ echo gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | + host=gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ local pod= logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | +++ get_client_pod logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:12:18 | gr-self-healing/13-read-from-replicas | ++ set +o xtrace logger.go:42: 09:12:19 | gr-self-healing/13-read-from-replicas | mysql-clienttrue logger.go:42: 09:12:19 | gr-self-healing/13-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:12:19 | gr-self-healing/13-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:12:19 | gr-self-healing/13-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:12:20 | gr-self-healing/13-read-from-replicas | + data='100500 logger.go:42: 09:12:20 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 09:12:20 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 09:12:20 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 09:12:20 | gr-self-healing/13-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 13-read-from-replicas-2 '--from-literal=data=100500 logger.go:42: 09:12:20 | gr-self-healing/13-read-from-replicas | 100501 logger.go:42: 09:12:20 | gr-self-healing/13-read-from-replicas | 100502 logger.go:42: 09:12:20 | gr-self-healing/13-read-from-replicas | 100503' logger.go:42: 09:12:21 | gr-self-healing/13-read-from-replicas | configmap/13-read-from-replicas-2 created logger.go:42: 09:12:22 | gr-self-healing/13-read-from-replicas | test step completed 13-read-from-replicas logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | starting test step 14-cluster-crash logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | running command: [sh -c set -o errexit set -o xtrace source ../../functions kill_pods "${NAMESPACE}" "label" "app.kubernetes.io/instance" "gr-self-healing" "cluster-crash" sleep 30 # wait for crash] logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | + source ../../functions logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ realpath ../../.. logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | ++++ pwd logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | ++ test_name=gr-self-healing logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ GIT_BRANCH=PR-869 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | ++++ which gdate logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | ++++ which date logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ date=/usr/bin/date logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ oc get projects logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ : logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ kubectl get nodes logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | +++ grep '^minikube' logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | + kill_pods kuttl-test-creative-mastodon label app.kubernetes.io/instance gr-self-healing cluster-crash logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | + local ns=kuttl-test-creative-mastodon logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | + local selector=label logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | + local pod_label=app.kubernetes.io/instance logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | + local label_value=gr-self-healing logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | + local chaos_suffix=cluster-crash logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | + '[' label == pod ']' logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | + '[' label == label ']' logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | + yq eval ' logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | .metadata.name = "chaos-kill-label-cluster-crash" | logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | .spec.mode = "all" | logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | del(.spec.selector.pods) | logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | .spec.selector.labelSelectors."app.kubernetes.io/instance" = "gr-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf/chaos-pod-kill.yml logger.go:42: 09:12:22 | gr-self-healing/14-cluster-crash | + kubectl apply --namespace kuttl-test-creative-mastodon -f - logger.go:42: 09:12:23 | gr-self-healing/14-cluster-crash | podchaos.chaos-mesh.org/chaos-kill-label-cluster-crash created logger.go:42: 09:12:23 | gr-self-healing/14-cluster-crash | + sleep 5 logger.go:42: 09:12:28 | gr-self-healing/14-cluster-crash | + sleep 30 logger.go:42: 09:17:05 | gr-self-healing/14-cluster-crash | test step completed 14-cluster-crash logger.go:42: 09:17:05 | gr-self-healing/15-write-data | starting test step 15-write-data logger.go:42: 09:17:05 | gr-self-healing/15-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "INSERT myDB.myTable (id) VALUES (100504)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" sleep 5] logger.go:42: 09:17:05 | gr-self-healing/15-write-data | + source ../../functions logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ realpath ../../.. logger.go:42: 09:17:05 | gr-self-healing/15-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | ++++ pwd logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:17:05 | gr-self-healing/15-write-data | ++ test_name=gr-self-healing logger.go:42: 09:17:05 | gr-self-healing/15-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:17:05 | gr-self-healing/15-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ GIT_BRANCH=PR-869 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:17:05 | gr-self-healing/15-write-data | ++++ which gdate logger.go:42: 09:17:05 | gr-self-healing/15-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:17:05 | gr-self-healing/15-write-data | ++++ which date logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ date=/usr/bin/date logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ oc get projects logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ : logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ kubectl get nodes logger.go:42: 09:17:05 | gr-self-healing/15-write-data | +++ grep '^minikube' logger.go:42: 09:17:06 | gr-self-healing/15-write-data | +++ get_cluster_name logger.go:42: 09:17:06 | gr-self-healing/15-write-data | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:17:06 | gr-self-healing/15-write-data | ++ get_mysql_router_service gr-self-healing logger.go:42: 09:17:06 | gr-self-healing/15-write-data | ++ local cluster=gr-self-healing logger.go:42: 09:17:06 | gr-self-healing/15-write-data | ++ echo gr-self-healing-router logger.go:42: 09:17:06 | gr-self-healing/15-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100504)' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:17:06 | gr-self-healing/15-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100504)' logger.go:42: 09:17:06 | gr-self-healing/15-write-data | + local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:17:06 | gr-self-healing/15-write-data | + local pod= logger.go:42: 09:17:06 | gr-self-healing/15-write-data | ++ get_client_pod logger.go:42: 09:17:06 | gr-self-healing/15-write-data | ++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:17:06 | gr-self-healing/15-write-data | + client_pod=mysql-client logger.go:42: 09:17:06 | gr-self-healing/15-write-data | + wait_pod mysql-client logger.go:42: 09:17:06 | gr-self-healing/15-write-data | + local pod=mysql-client logger.go:42: 09:17:06 | gr-self-healing/15-write-data | + set +o xtrace logger.go:42: 09:17:07 | gr-self-healing/15-write-data | mysql-clienttrue logger.go:42: 09:17:07 | gr-self-healing/15-write-data | + kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100504)" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:17:07 | gr-self-healing/15-write-data | + sed -e 's/mysql: //' logger.go:42: 09:17:07 | gr-self-healing/15-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:17:08 | gr-self-healing/15-write-data | + : logger.go:42: 09:17:08 | gr-self-healing/15-write-data | + sleep 5 logger.go:42: 09:17:13 | gr-self-healing/15-write-data | test step completed 15-write-data logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | starting test step 16-read-from-replicas logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions for i in 0 1 2; do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 16-read-from-replicas-${i} --from-literal=data="${data}" done] logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | + source ../../functions logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ realpath ../../.. logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | ++++ pwd logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | ++ test_name=gr-self-healing logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ GIT_BRANCH=PR-869 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | ++++ which gdate logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | ++++ which date logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ oc get projects logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ : logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ kubectl get nodes logger.go:42: 09:17:13 | gr-self-healing/16-read-from-replicas | +++ grep '^minikube' logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | +++ get_cluster_name logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 0 logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | ++ local index=0 logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | ++ echo gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | + host=gr-self-healing-mysql-0.gr-self-healing-mysql logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | ++ local pod= logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | +++ get_client_pod logger.go:42: 09:17:14 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:17:15 | gr-self-healing/16-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:17:15 | gr-self-healing/16-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:17:15 | gr-self-healing/16-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:17:15 | gr-self-healing/16-read-from-replicas | ++ set +o xtrace logger.go:42: 09:17:15 | gr-self-healing/16-read-from-replicas | mysql-clienttrue logger.go:42: 09:17:15 | gr-self-healing/16-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-0.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:17:15 | gr-self-healing/16-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:17:15 | gr-self-healing/16-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | + data='100500 logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 16-read-from-replicas-0 '--from-literal=data=100500 logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | configmap/16-read-from-replicas-0 created logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | +++ get_cluster_name logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 1 logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | ++ local index=1 logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | ++ echo gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | + host=gr-self-healing-mysql-1.gr-self-healing-mysql logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | ++ local pod= logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | +++ get_client_pod logger.go:42: 09:17:17 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:17:18 | gr-self-healing/16-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:17:18 | gr-self-healing/16-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:17:18 | gr-self-healing/16-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:17:18 | gr-self-healing/16-read-from-replicas | ++ set +o xtrace logger.go:42: 09:17:18 | gr-self-healing/16-read-from-replicas | mysql-clienttrue logger.go:42: 09:17:18 | gr-self-healing/16-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-1.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:17:18 | gr-self-healing/16-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:17:18 | gr-self-healing/16-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | + data='100500 logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 16-read-from-replicas-1 '--from-literal=data=100500 logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | configmap/16-read-from-replicas-1 created logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | + for i in 0 1 2 logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | +++ get_cluster_name logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | ++ get_mysql_headless_fqdn gr-self-healing 2 logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | ++ local cluster=gr-self-healing logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | ++ local index=2 logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | ++ echo gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | + host=gr-self-healing-mysql-2.gr-self-healing-mysql logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | ++ local 'uri=-h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | ++ local pod= logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | +++ get_client_pod logger.go:42: 09:17:20 | gr-self-healing/16-read-from-replicas | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:17:21 | gr-self-healing/16-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 09:17:21 | gr-self-healing/16-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 09:17:21 | gr-self-healing/16-read-from-replicas | ++ local pod=mysql-client logger.go:42: 09:17:21 | gr-self-healing/16-read-from-replicas | ++ set +o xtrace logger.go:42: 09:17:21 | gr-self-healing/16-read-from-replicas | mysql-clienttrue logger.go:42: 09:17:21 | gr-self-healing/16-read-from-replicas | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-self-healing-mysql-2.gr-self-healing-mysql -uroot -proot_password' logger.go:42: 09:17:21 | gr-self-healing/16-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 09:17:21 | gr-self-healing/16-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:17:23 | gr-self-healing/16-read-from-replicas | + data='100500 logger.go:42: 09:17:23 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 09:17:23 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 09:17:23 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 09:17:23 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 09:17:23 | gr-self-healing/16-read-from-replicas | + kubectl create configmap -n kuttl-test-creative-mastodon 16-read-from-replicas-2 '--from-literal=data=100500 logger.go:42: 09:17:23 | gr-self-healing/16-read-from-replicas | 100501 logger.go:42: 09:17:23 | gr-self-healing/16-read-from-replicas | 100502 logger.go:42: 09:17:23 | gr-self-healing/16-read-from-replicas | 100503 logger.go:42: 09:17:23 | gr-self-healing/16-read-from-replicas | 100504' logger.go:42: 09:17:23 | gr-self-healing/16-read-from-replicas | configmap/16-read-from-replicas-2 created logger.go:42: 09:17:24 | gr-self-healing/16-read-from-replicas | test step completed 16-read-from-replicas logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | starting test step 17-quorum-loss logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | running command: [sh -c set -o errexit set -o xtrace source ../../functions primary=$(get_primary_from_group_replication) a_replica=$(run_mysql \ "SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='SECONDARY' LIMIT 1;" \ "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" | cut -d'.' -f1) kubectl -n ${NAMESPACE} delete pod ${primary} ${a_replica} --force --grace-period=0] logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | + source ../../functions logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ realpath ../../.. logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | ++++ pwd logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | ++ test_name=gr-self-healing logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ GIT_BRANCH=PR-869 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | ++++ which gdate logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | ++++ which date logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ date=/usr/bin/date logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ oc get projects logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ : logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ kubectl get nodes logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | +++ grep '^minikube' logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | ++ get_primary_from_group_replication logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | ++ cut -d. -f1 logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | ++++ get_cluster_name logger.go:42: 09:17:24 | gr-self-healing/17-quorum-loss | ++++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | +++ get_mysql_router_service gr-self-healing logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | +++ local cluster=gr-self-healing logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | +++ echo gr-self-healing-router logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';' logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | ++ local pod= logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | +++ get_client_pod logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | ++ client_pod=mysql-client logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | ++ wait_pod mysql-client logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | ++ local pod=mysql-client logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | ++ set +o xtrace logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | mysql-clienttrue logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='\''PRIMARY'\'';" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | ++ sed -e 's/mysql: //' logger.go:42: 09:17:25 | gr-self-healing/17-quorum-loss | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | + primary=gr-self-healing-mysql-0 logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | ++ cut -d. -f1 logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | ++++ get_cluster_name logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | ++++ kubectl -n kuttl-test-creative-mastodon get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | +++ get_mysql_router_service gr-self-healing logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | +++ local cluster=gr-self-healing logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | +++ echo gr-self-healing-router logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | ++ run_mysql 'SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='\''SECONDARY'\'' LIMIT 1;' '-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | ++ local 'command=SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='\''SECONDARY'\'' LIMIT 1;' logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | ++ local 'uri=-h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | ++ local pod= logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | +++ get_client_pod logger.go:42: 09:17:27 | gr-self-healing/17-quorum-loss | +++ kubectl -n kuttl-test-creative-mastodon get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:17:28 | gr-self-healing/17-quorum-loss | ++ client_pod=mysql-client logger.go:42: 09:17:28 | gr-self-healing/17-quorum-loss | ++ wait_pod mysql-client logger.go:42: 09:17:28 | gr-self-healing/17-quorum-loss | ++ local pod=mysql-client logger.go:42: 09:17:28 | gr-self-healing/17-quorum-loss | ++ set +o xtrace logger.go:42: 09:17:28 | gr-self-healing/17-quorum-loss | mysql-clienttrue logger.go:42: 09:17:28 | gr-self-healing/17-quorum-loss | ++ kubectl -n kuttl-test-creative-mastodon exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='\''SECONDARY'\'' LIMIT 1;" | mysql -sN -h gr-self-healing-router -P 6446 -uroot -proot_password' logger.go:42: 09:17:28 | gr-self-healing/17-quorum-loss | ++ sed -e 's/mysql: //' logger.go:42: 09:17:28 | gr-self-healing/17-quorum-loss | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:17:30 | gr-self-healing/17-quorum-loss | + a_replica=gr-self-healing-mysql-2 logger.go:42: 09:17:30 | gr-self-healing/17-quorum-loss | + kubectl -n kuttl-test-creative-mastodon delete pod gr-self-healing-mysql-0 gr-self-healing-mysql-2 --force --grace-period=0 logger.go:42: 09:17:30 | gr-self-healing/17-quorum-loss | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 09:17:30 | gr-self-healing/17-quorum-loss | pod "gr-self-healing-mysql-0" force deleted logger.go:42: 09:17:30 | gr-self-healing/17-quorum-loss | pod "gr-self-healing-mysql-2" force deleted logger.go:42: 09:20:34 | gr-self-healing/17-quorum-loss | test step completed 17-quorum-loss logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | starting test step 97-destroy-chaos-mesh logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_chaos_mesh] logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | + source ../../functions logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ realpath ../../.. logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | ++++ pwd logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | ++ test_name=gr-self-healing logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ GIT_BRANCH=PR-869 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | ++++ which gdate logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | ++++ which date logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ date=/usr/bin/date logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ oc get projects logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ : logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ kubectl get nodes logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | +++ grep '^minikube' logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | + destroy_chaos_mesh logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | ++ tail -n1 logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '-F ' '{print $2}' logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | ++ sed s/NAMESPACE// logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-869/kubeconfig logger.go:42: 09:20:34 | gr-self-healing/97-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-869/kubeconfig logger.go:42: 09:20:36 | gr-self-healing/97-destroy-chaos-mesh | + local chaos_mesh_ns=kuttl-test-creative-mastodon logger.go:42: 09:20:36 | gr-self-healing/97-destroy-chaos-mesh | + '[' -n kuttl-test-creative-mastodon ']' logger.go:42: 09:20:36 | gr-self-healing/97-destroy-chaos-mesh | + helm uninstall --wait --timeout 60s chaos-mesh --namespace kuttl-test-creative-mastodon logger.go:42: 09:20:36 | gr-self-healing/97-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-869/kubeconfig logger.go:42: 09:20:36 | gr-self-healing/97-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-869/kubeconfig logger.go:42: 09:20:45 | gr-self-healing/97-destroy-chaos-mesh | release "chaos-mesh" uninstalled logger.go:42: 09:20:45 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration logger.go:42: 09:20:45 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 09:20:45 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:20:45 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration logger.go:42: 09:20:45 | gr-self-healing/97-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 09:20:45 | gr-self-healing/97-destroy-chaos-mesh | + : logger.go:42: 09:20:45 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 09:20:45 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 09:20:45 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | + : logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | ++ grep validate-auth logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | + : logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl api-resources logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 09:20:46 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:20:47 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:47 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:47 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:47 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete awschaos --all --all-namespaces logger.go:42: 09:20:47 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:20:47 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:47 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:47 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:48 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete azurechaos --all --all-namespaces logger.go:42: 09:20:48 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:20:48 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:48 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:48 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:49 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete blockchaos --all --all-namespaces logger.go:42: 09:20:49 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:20:49 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:49 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:49 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:49 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete dnschaos --all --all-namespaces logger.go:42: 09:20:50 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:20:50 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:50 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:50 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:50 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete gcpchaos --all --all-namespaces logger.go:42: 09:20:50 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:20:50 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:50 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:50 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:51 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete httpchaos --all --all-namespaces logger.go:42: 09:20:51 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:20:51 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:51 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:51 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:52 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete iochaos --all --all-namespaces logger.go:42: 09:20:52 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:20:52 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:52 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:52 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:52 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete jvmchaos --all --all-namespaces logger.go:42: 09:20:53 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:20:53 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:53 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:53 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:53 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete kernelchaos --all --all-namespaces logger.go:42: 09:20:53 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:20:53 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:53 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:53 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:54 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-primary kuttl-test-creative-mastodon' logger.go:42: 09:20:54 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:20:54 | gr-self-healing/97-destroy-chaos-mesh | + local kind=NetworkChaos logger.go:42: 09:20:54 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-primary kuttl-test-creative-mastodon' logger.go:42: 09:20:54 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 09:20:54 | gr-self-healing/97-destroy-chaos-mesh | + local name=chaos-pod-network-loss-primary logger.go:42: 09:20:54 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'NetworkChaos chaos-pod-network-loss-primary kuttl-test-creative-mastodon' logger.go:42: 09:20:54 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 09:20:54 | gr-self-healing/97-destroy-chaos-mesh | + local namespace=kuttl-test-creative-mastodon logger.go:42: 09:20:54 | gr-self-healing/97-destroy-chaos-mesh | + kubectl patch NetworkChaos chaos-pod-network-loss-primary -n kuttl-test-creative-mastodon --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 09:20:55 | gr-self-healing/97-destroy-chaos-mesh | networkchaos.chaos-mesh.org/chaos-pod-network-loss-primary patched logger.go:42: 09:20:55 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:55 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete networkchaos --all --all-namespaces logger.go:42: 09:20:55 | gr-self-healing/97-destroy-chaos-mesh | networkchaos.chaos-mesh.org "chaos-pod-network-loss-primary" deleted logger.go:42: 09:20:55 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:55 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:55 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:56 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces logger.go:42: 09:20:56 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:20:56 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:56 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:56 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:56 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachines --all --all-namespaces logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-kill-label-cluster-crash kuttl-test-creative-mastodon' logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | + local kind=PodChaos logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-kill-label-cluster-crash kuttl-test-creative-mastodon' logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | + local name=chaos-kill-label-cluster-crash logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-kill-label-cluster-crash kuttl-test-creative-mastodon' logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | + local namespace=kuttl-test-creative-mastodon logger.go:42: 09:20:57 | gr-self-healing/97-destroy-chaos-mesh | + kubectl patch PodChaos chaos-kill-label-cluster-crash -n kuttl-test-creative-mastodon --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org/chaos-kill-label-cluster-crash patched logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-primary kuttl-test-creative-mastodon' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | + local kind=PodChaos logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-primary kuttl-test-creative-mastodon' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | + local name=chaos-pod-failure-primary logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-failure-primary kuttl-test-creative-mastodon' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | + local namespace=kuttl-test-creative-mastodon logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | + kubectl patch PodChaos chaos-pod-failure-primary -n kuttl-test-creative-mastodon --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org/chaos-pod-failure-primary patched logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-primary kuttl-test-creative-mastodon' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | + local kind=PodChaos logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-primary kuttl-test-creative-mastodon' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $2}' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | + local name=chaos-pod-kill-primary logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ echo 'PodChaos chaos-pod-kill-primary kuttl-test-creative-mastodon' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $3}' logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | + local namespace=kuttl-test-creative-mastodon logger.go:42: 09:20:58 | gr-self-healing/97-destroy-chaos-mesh | + kubectl patch PodChaos chaos-pod-kill-primary -n kuttl-test-creative-mastodon --type=merge -p '{"metadata":{"finalizers":[]}}' logger.go:42: 09:20:59 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org/chaos-pod-kill-primary patched logger.go:42: 09:20:59 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:20:59 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete podchaos --all --all-namespaces logger.go:42: 09:20:59 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-kill-label-cluster-crash" deleted logger.go:42: 09:20:59 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-failure-primary" deleted logger.go:42: 09:20:59 | gr-self-healing/97-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-kill-primary" deleted logger.go:42: 09:21:00 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:21:00 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:21:00 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:21:00 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete podhttpchaos --all --all-namespaces logger.go:42: 09:21:00 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:21:00 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:21:00 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:21:00 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:21:01 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete podiochaos --all --all-namespaces logger.go:42: 09:21:01 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:21:01 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:21:01 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:21:01 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:21:02 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces logger.go:42: 09:21:02 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:21:02 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:21:02 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:21:02 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:21:02 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete remoteclusters --all --all-namespaces logger.go:42: 09:21:03 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:21:03 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:21:03 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:21:03 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:21:03 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete schedules --all --all-namespaces logger.go:42: 09:21:03 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:21:03 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:21:03 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:21:03 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:21:04 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete statuschecks --all --all-namespaces logger.go:42: 09:21:04 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:21:04 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:21:04 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:21:04 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:21:04 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete stresschaos --all --all-namespaces logger.go:42: 09:21:05 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:21:05 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:21:05 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:21:05 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:21:05 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete timechaos --all --all-namespaces logger.go:42: 09:21:06 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:21:06 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:21:06 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:21:06 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:21:06 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete workflownodes --all --all-namespaces logger.go:42: 09:21:06 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:21:06 | gr-self-healing/97-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 09:21:06 | gr-self-healing/97-destroy-chaos-mesh | + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace logger.go:42: 09:21:06 | gr-self-healing/97-destroy-chaos-mesh | + read -r line logger.go:42: 09:21:07 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete workflows --all --all-namespaces logger.go:42: 09:21:07 | gr-self-healing/97-destroy-chaos-mesh | No resources found logger.go:42: 09:21:07 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get crd logger.go:42: 09:21:07 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh.org logger.go:42: 09:21:07 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:21:08 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org logger.go:42: 09:21:08 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted logger.go:42: 09:21:08 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted logger.go:42: 09:21:09 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted logger.go:42: 09:21:09 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted logger.go:42: 09:21:09 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted logger.go:42: 09:21:09 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted logger.go:42: 09:21:10 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted logger.go:42: 09:21:10 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted logger.go:42: 09:21:10 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted logger.go:42: 09:21:11 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted logger.go:42: 09:21:11 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted logger.go:42: 09:21:12 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted logger.go:42: 09:21:12 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted logger.go:42: 09:21:12 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted logger.go:42: 09:21:12 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted logger.go:42: 09:21:12 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted logger.go:42: 09:21:12 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted logger.go:42: 09:21:13 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted logger.go:42: 09:21:13 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted logger.go:42: 09:21:13 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted logger.go:42: 09:21:13 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted logger.go:42: 09:21:15 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted logger.go:42: 09:21:16 | gr-self-healing/97-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted logger.go:42: 09:21:20 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get clusterrolebinding logger.go:42: 09:21:20 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 09:21:20 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:21:21 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding logger.go:42: 09:21:21 | gr-self-healing/97-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 09:21:21 | gr-self-healing/97-destroy-chaos-mesh | + : logger.go:42: 09:21:21 | gr-self-healing/97-destroy-chaos-mesh | ++ kubectl get clusterrole logger.go:42: 09:21:21 | gr-self-healing/97-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 09:21:21 | gr-self-healing/97-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 09:21:22 | gr-self-healing/97-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrole logger.go:42: 09:21:22 | gr-self-healing/97-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 09:21:22 | gr-self-healing/97-destroy-chaos-mesh | + : logger.go:42: 09:21:22 | gr-self-healing/97-destroy-chaos-mesh | test step completed 97-destroy-chaos-mesh logger.go:42: 09:21:22 | gr-self-healing/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 09:21:22 | gr-self-healing/98-drop-finalizer | PerconaServerMySQL:kuttl-test-creative-mastodon/gr-self-healing updated logger.go:42: 09:21:22 | gr-self-healing/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/tests/gr-self-healing logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | ++ test_name=gr-self-healing logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/vars.sh logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-869 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/deploy logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-869/e2e-tests/conf logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/gr-self-healing logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-869 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-869 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export VERSION=PR-869-ff26afb0 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ VERSION=PR-869-ff26afb0 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-869-ff26afb0 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-869/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | ++++ which date logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ date=/usr/bin/date logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ : logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 09:21:22 | gr-self-healing/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 09:21:23 | gr-self-healing/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 09:21:23 | gr-self-healing/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 09:21:23 | gr-self-healing/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 09:21:23 | gr-self-healing/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 09:21:24 | gr-self-healing/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 09:21:24 | gr-self-healing/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 09:21:24 | gr-self-healing/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 09:21:24 | gr-self-healing/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 09:21:30 | gr-self-healing/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 09:21:30 | gr-self-healing | gr-self-healing events from ns kuttl-test-creative-mastodon: logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:00:53 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/mysql-client to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-fhs2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:00:53 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulling Pulling image "percona/percona-server:8.0.33" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:14 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Successfully pulled image "percona/percona-server:8.0.33" in 20.178s (20.178s including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:14 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:14 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:32 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-5f8b4885cf SuccessfulCreate Created pod: chaos-controller-manager-5f8b4885cf-8glbz replicaset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:32 +0000 UTC Normal Deployment.apps chaos-controller-manager ScalingReplicaSet Scaled up replica set chaos-controller-manager-5f8b4885cf to 3 deployment-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:32 +0000 UTC Normal Pod chaos-daemon-5bqbc Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/chaos-daemon-5bqbc to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-fhs2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:32 +0000 UTC Normal Pod chaos-daemon-n6ts5 Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/chaos-daemon-n6ts5 to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-95l2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:32 +0000 UTC Normal Pod chaos-daemon-xdchq Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/chaos-daemon-xdchq to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-dngr default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:32 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-xdchq daemonset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:32 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-5bqbc daemonset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:32 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-n6ts5 daemonset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:33 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-625fs Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/chaos-controller-manager-5f8b4885cf-625fs to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-95l2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:33 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-625fs.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:33 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-8glbz Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/chaos-controller-manager-5f8b4885cf-8glbz to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-fhs2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:33 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-8glbz.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:33 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-9sg5n Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/chaos-controller-manager-5f8b4885cf-9sg5n to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-dngr default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:33 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-5f8b4885cf SuccessfulCreate Created pod: chaos-controller-manager-5f8b4885cf-625fs replicaset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:33 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-5f8b4885cf SuccessfulCreate Created pod: chaos-controller-manager-5f8b4885cf-9sg5n replicaset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:33 +0000 UTC Warning Pod chaos-daemon-5bqbc FailedMount MountVolume.SetUp failed for volume "chaos-daemon-cert" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:33 +0000 UTC Normal Pod chaos-daemon-n6ts5.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:33 +0000 UTC Normal Pod chaos-daemon-xdchq.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:34 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-9sg5n.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:34 +0000 UTC Normal Pod chaos-daemon-5bqbc.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:35 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-8glbz.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 1.942s (1.942s including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:35 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-8glbz.spec.containers{chaos-mesh} Created Created container: chaos-mesh kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:35 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-8glbz.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:35 +0000 UTC Normal ConfigMap chaos-mesh LeaderElection chaos-controller-manager-5f8b4885cf-8glbz_7c3f016f-d50e-44dc-aa5b-d35e7c0817d9 became leader logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:36 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-625fs.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 2.884s (2.884s including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:36 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-625fs.spec.containers{chaos-mesh} Created Created container: chaos-mesh kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:36 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-625fs.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:36 +0000 UTC Normal Lease.coordination.k8s.io chaos-mesh LeaderElection chaos-controller-manager-5f8b4885cf-8glbz_7c3f016f-d50e-44dc-aa5b-d35e7c0817d9 became leader logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:37 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-9sg5n.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 3.191s (3.191s including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:37 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-9sg5n.spec.containers{chaos-mesh} Created Created container: chaos-mesh kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:37 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-9sg5n.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:43 +0000 UTC Normal Pod chaos-daemon-n6ts5.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 10.55s (10.55s including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:43 +0000 UTC Normal Pod chaos-daemon-n6ts5.spec.containers{chaos-daemon} Created Created container: chaos-daemon kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:43 +0000 UTC Normal Pod chaos-daemon-n6ts5.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:43 +0000 UTC Normal Pod chaos-daemon-xdchq.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 10.652s (10.652s including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:43 +0000 UTC Normal Pod chaos-daemon-xdchq.spec.containers{chaos-daemon} Created Created container: chaos-daemon kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:43 +0000 UTC Normal Pod chaos-daemon-xdchq.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:44 +0000 UTC Normal Pod chaos-daemon-5bqbc.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 9.915s (9.915s including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:44 +0000 UTC Normal Pod chaos-daemon-5bqbc.spec.containers{chaos-daemon} Created Created container: chaos-daemon kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:44 +0000 UTC Normal Pod chaos-daemon-5bqbc.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:49 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:49 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:49 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-creative-mastodon/datadir-gr-self-healing-mysql-0" pd.csi.storage.gke.io_gke-ca8bc98c0aca4a4293fc-e814-b2fe-vm_d46e66f9-b8ed-4323-8e7a-afcf67dcca28 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:49 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Claim datadir-gr-self-healing-mysql-0 Pod gr-self-healing-mysql-0 in StatefulSet gr-self-healing-mysql success statefulset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:49 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Pod gr-self-healing-mysql-0 in StatefulSet gr-self-healing-mysql successful statefulset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:53 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-dcb929a7-67e4-404f-9a31-64d7789fa44e pd.csi.storage.gke.io_gke-ca8bc98c0aca4a4293fc-e814-b2fe-vm_d46e66f9-b8ed-4323-8e7a-afcf67dcca28 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:01:53 +0000 UTC Normal Pod gr-self-healing-mysql-0 Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-mysql-0 to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-fhs2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:00 +0000 UTC Normal Pod gr-self-healing-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-dcb929a7-67e4-404f-9a31-64d7789fa44e" attachdetach-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:04 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:04 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 142ms (142ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:04 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:04 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:11 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:11 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 131ms (131ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:11 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:11 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:11 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:11 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 118ms (118ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:11 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:11 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:44 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:44 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-creative-mastodon/datadir-gr-self-healing-mysql-1" pd.csi.storage.gke.io_gke-ca8bc98c0aca4a4293fc-e814-b2fe-vm_d46e66f9-b8ed-4323-8e7a-afcf67dcca28 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:44 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:44 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Claim datadir-gr-self-healing-mysql-1 Pod gr-self-healing-mysql-1 in StatefulSet gr-self-healing-mysql success statefulset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:44 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Pod gr-self-healing-mysql-1 in StatefulSet gr-self-healing-mysql successful statefulset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:48 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-75f83107-6073-455a-a5b0-4766dee74cb3 pd.csi.storage.gke.io_gke-ca8bc98c0aca4a4293fc-e814-b2fe-vm_d46e66f9-b8ed-4323-8e7a-afcf67dcca28 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:48 +0000 UTC Normal Pod gr-self-healing-mysql-1 Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-mysql-1 to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-95l2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:56 +0000 UTC Normal Pod gr-self-healing-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-75f83107-6073-455a-a5b0-4766dee74cb3" attachdetach-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:57 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:57 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 144ms (144ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:57 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:57 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:59 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:59 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 102ms (102ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:59 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:59 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:59 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:59 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 109ms (109ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:59 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:02:59 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:03:30 +0000 UTC Warning Pod gr-self-healing-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/19 09:03:17 Bootstrap starting... 2025/03/19 09:03:17 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable |Disabled super_read_only on the instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' Enabling super_read_only on the instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' was configured to be used in an InnoDB cluster. 2025/03/19 09:03:17 Instance (gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon) configured to join to the InnoDB cluster 2025/03/19 09:03:17 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon] 2025/03/19 09:03:17 Running dba.getCluster('grselfhealing') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/03/19 09:03:17 Connected to peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon 2025/03/19 09:03:18 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Topology: Member 0 Address: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] 2025/03/19 09:03:18 Adding instance (gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon) to InnoDB cluster 2025/03/19 09:03:18 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: The target instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to decide whether incremental state recovery can correctly provision it. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306... This instance reports its own address as gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is being cloned from gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 0% In Progress REDO COPY 0% Not Started** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:03:30 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:03:30 +0000 UTC Warning Pod gr-self-healing-mysql-1.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:03:30 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 109ms (109ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:02 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:02 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:02 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-creative-mastodon/datadir-gr-self-healing-mysql-2" pd.csi.storage.gke.io_gke-ca8bc98c0aca4a4293fc-e814-b2fe-vm_d46e66f9-b8ed-4323-8e7a-afcf67dcca28 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:02 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Claim datadir-gr-self-healing-mysql-2 Pod gr-self-healing-mysql-2 in StatefulSet gr-self-healing-mysql success statefulset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:02 +0000 UTC Normal StatefulSet.apps gr-self-healing-mysql SuccessfulCreate create Pod gr-self-healing-mysql-2 in StatefulSet gr-self-healing-mysql successful statefulset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:05 +0000 UTC Normal PersistentVolumeClaim datadir-gr-self-healing-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-1e318172-5767-4cf4-ae6f-b3f012b2117d pd.csi.storage.gke.io_gke-ca8bc98c0aca4a4293fc-e814-b2fe-vm_d46e66f9-b8ed-4323-8e7a-afcf67dcca28 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:06 +0000 UTC Normal Pod gr-self-healing-mysql-2 Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-mysql-2 to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-dngr default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:13 +0000 UTC Normal Pod gr-self-healing-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-1e318172-5767-4cf4-ae6f-b3f012b2117d" attachdetach-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:15 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:15 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 171ms (171ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:15 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:15 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:17 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:17 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 102ms (102ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:17 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:17 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:17 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:17 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 128ms (128ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:17 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:17 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:48 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/19 09:04:35 Bootstrap starting... 2025/03/19 09:04:35 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable |Disabled super_read_only on the instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' Enabling super_read_only on the instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' was configured to be used in an InnoDB cluster. 2025/03/19 09:04:35 Instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon) configured to join to the InnoDB cluster 2025/03/19 09:04:35 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon] 2025/03/19 09:04:35 Running dba.getCluster('grselfhealing') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/03/19 09:04:35 Connected to peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon 2025/03/19 09:04:36 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Topology: Member 0 Address: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] Member 1 Address: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] 2025/03/19 09:04:36 Adding instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon) to InnoDB cluster 2025/03/19 09:04:36 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: The target instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to decide whether incremental state recovery can correctly provision it. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is being cloned from gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:48 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:48 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:04:48 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 145ms (145ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lgck Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-router-9b57d7865-5lgck to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-fhs2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lgck.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lssx Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-router-9b57d7865-5lssx to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-95l2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-76rnp Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-router-9b57d7865-76rnp to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-dngr default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:25 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-9b57d7865 SuccessfulCreate Created pod: gr-self-healing-router-9b57d7865-5lgck replicaset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:25 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-9b57d7865 SuccessfulCreate Created pod: gr-self-healing-router-9b57d7865-5lssx replicaset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:25 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-9b57d7865 SuccessfulCreate Created pod: gr-self-healing-router-9b57d7865-76rnp replicaset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:25 +0000 UTC Normal Deployment.apps gr-self-healing-router ScalingReplicaSet Scaled up replica set gr-self-healing-router-9b57d7865 to 3 deployment-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lgck.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 176ms (176ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lgck.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lgck.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lssx.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lssx.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 179ms (179ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lssx.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lssx.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-76rnp.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-76rnp.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 179ms (179ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-76rnp.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-76rnp.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:27 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lssx.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:27 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lssx.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 101ms (101ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:27 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lssx.spec.containers{router} Created Created container: router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:27 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-76rnp.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:27 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-76rnp.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 105ms (105ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:27 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-76rnp.spec.containers{router} Created Created container: router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:27 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-76rnp.spec.containers{router} Started Started container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lgck.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lgck.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 94ms (94ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lgck.spec.containers{router} Created Created container: router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lgck.spec.containers{router} Started Started container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:05:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lssx.spec.containers{router} Started Started container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:04 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-primary FinalizerInited Finalizer has been inited logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:04 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-primary Updated Successfully update finalizer of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:04 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-primary Updated Successfully update desiredPhase of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:04 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-primary Applied Successfully apply chaos for kuttl-test-creative-mastodon/gr-self-healing-mysql-0 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:04 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-primary Updated Successfully update records of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:04 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:04 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:04 +0000 UTC Normal Pod gr-self-healing-mysql-0 Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-mysql-0 to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-fhs2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:05 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:05 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 176ms (176ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:05 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:05 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:08 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:08 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 127ms (127ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:08 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:08 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:08 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:08 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 109ms (109ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:08 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:08 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:39 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/19 09:06:25 Bootstrap starting... 2025/03/19 09:06:25 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 applierWorkerThreads will be set to the default value of 4. The instance 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is valid to be used in an InnoDB cluster. The instance 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is already ready to be used in an InnoDB cluster. Successfully enabled parallel appliers. 2025/03/19 09:06:26 Instance (gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon) configured to join to the InnoDB cluster 2025/03/19 09:06:26 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon] 2025/03/19 09:06:26 Running dba.getCluster('grselfhealing') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance does not belong to that metadata, and GR is not active) (RuntimeError) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2025/03/19 09:06:26 Failed get cluster from peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon, stdout: stderr: Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance does not belong to that metadata, and GR is not active) (RuntimeError) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2025/03/19 09:06:26 Running dba.getCluster('grselfhealing') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/03/19 09:06:26 Connected to peer gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon 2025/03/19 09:06:27 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Topology: Member 0 Address: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] Member 1 Address: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] 2025/03/19 09:06:27 Adding instance (gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon) to InnoDB cluster 2025/03/19 09:06:27 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306... This instance reports its own address as gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is being cloned from gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 ** Stage DROP DATA: \ ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:39 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:39 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:06:39 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 125ms (125ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:07:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary FinalizerInited Finalizer has been inited logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:07:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Updated Successfully update finalizer of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:07:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Started Experiment has started logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:07:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Updated Successfully update desiredPhase of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:07:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Applied Successfully apply chaos for kuttl-test-creative-mastodon/gr-self-healing-mysql-2 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:07:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Updated Successfully update records of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:07:42 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Killing Container xtrabackup definition changed, will be restarted kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:07:42 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Killing Container mysql definition changed, will be restarted kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:07:45 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} Unhealthy Liveness probe failed: 2025/03/19 09:07:45 in primary partition: false 2025/03/19 09:07:45 liveness check failed: possible split brain! kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:07:49 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulling Pulling image "gcr.io/google-containers/pause:latest" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:07:49 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "gcr.io/google-containers/pause:latest" in 343ms (343ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:08:41 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary TimeUp Time up according to the duration logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:08:41 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Updated Successfully update desiredPhase of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:08:41 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Recovered Successfully recover chaos for kuttl-test-creative-mastodon/gr-self-healing-mysql-2 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:08:41 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-primary Updated Successfully update records of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:09:20 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/19 09:09:05 Bootstrap starting... 2025/03/19 09:09:05 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 applierWorkerThreads will be set to the default value of 4. The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is valid to be used in an InnoDB cluster. The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is already ready to be used in an InnoDB cluster. Successfully enabled parallel appliers. 2025/03/19 09:09:05 Instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon) configured to join to the InnoDB cluster 2025/03/19 09:09:05 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon] 2025/03/19 09:09:05 Running dba.getCluster('grselfhealing') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/03/19 09:09:05 Connected to peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon 2025/03/19 09:09:06 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Topology: Member 0 Address: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] Member 1 Address: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] 2025/03/19 09:09:06 Adding instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon) to InnoDB cluster 2025/03/19 09:09:06 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is being cloned from gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 0% In Progress PAGE COPY 0% Not Started REDO COPY 0% Not Started** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary FinalizerInited Finalizer has been inited logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update finalizer of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Started Experiment has started logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update desiredPhase of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update records of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Applied Successfully apply chaos for kuttl-test-creative-mastodon/gr-self-healing-mysql-0 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update records of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:22 +0000 UTC Normal PodNetworkChaos.chaos-mesh.org gr-self-healing-mysql-0 Updated Successfully update ObservedGeneration and FailedMessage of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:35 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Liveness probe failed: 2025/03/19 09:10:35 in primary partition: false 2025/03/19 09:10:35 liveness check failed: possible split brain! kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:35 +0000 UTC Warning Pod gr-self-healing-router-9b57d7865-5lgck.spec.containers{router} Unhealthy Readiness probe failed: Read-write route is not healthy kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:36 +0000 UTC Warning Pod gr-self-healing-router-9b57d7865-5lssx.spec.containers{router} Unhealthy Readiness probe failed: Read-write route is not healthy kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:36 +0000 UTC Warning Pod gr-self-healing-router-9b57d7865-76rnp.spec.containers{router} Unhealthy Readiness probe failed: Read-write route is not healthy kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:45 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Liveness probe failed: 2025/03/19 09:10:45 in primary partition: false 2025/03/19 09:10:45 liveness check failed: possible split brain! kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:55 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Liveness probe failed: 2025/03/19 09:10:55 in primary partition: false 2025/03/19 09:10:55 liveness check failed: possible split brain! kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:10:55 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Container mysql failed liveness probe, will be restarted kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:11:09 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: command "/opt/percona/healthcheck readiness" timed out kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:11:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary TimeUp Time up according to the duration logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:11:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update desiredPhase of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:11:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update records of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:11:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Recovered Successfully recover chaos for kuttl-test-creative-mastodon/gr-self-healing-mysql-0 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:11:22 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-primary Updated Successfully update records of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:11:22 +0000 UTC Normal PodNetworkChaos.chaos-mesh.org gr-self-healing-mysql-0 Updated Successfully update ObservedGeneration and FailedMessage of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:11:26 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:23 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash FinalizerInited Finalizer has been inited logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:23 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Updated Successfully update finalizer of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:23 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Updated Successfully update desiredPhase of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:23 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-creative-mastodon/gr-self-healing-router-9b57d7865-5lssx logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:23 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lssx.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:24 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-creative-mastodon/gr-self-healing-mysql-0 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:24 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-creative-mastodon/gr-self-healing-mysql-1 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:24 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-creative-mastodon/gr-self-healing-mysql-2 logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:24 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-creative-mastodon/gr-self-healing-router-9b57d7865-5lgck logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:24 +0000 UTC Normal Pod gr-self-healing-mysql-0 Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-mysql-0 to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-fhs2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:24 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:24 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:24 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-5lgck.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:24 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-router-9b57d7865-n6vjv to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-95l2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:24 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-9b57d7865 SuccessfulCreate Created pod: gr-self-healing-router-9b57d7865-n6vjv replicaset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Applied Successfully apply chaos for kuttl-test-creative-mastodon/gr-self-healing-router-9b57d7865-76rnp logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-kill-label-cluster-crash Updated Successfully update records of resource logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 153ms (153ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-76rnp.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 161ms (161ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-router-9b57d7865-r2l9r to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-fhs2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-router-9b57d7865-tq4pz to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-dngr default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-9b57d7865 SuccessfulCreate Created pod: gr-self-healing-router-9b57d7865-r2l9r replicaset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:25 +0000 UTC Normal ReplicaSet.apps gr-self-healing-router-9b57d7865 SuccessfulCreate Created pod: gr-self-healing-router-9b57d7865-tq4pz replicaset-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 144ms (144ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 133ms (133ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:26 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:27 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 153ms (153ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 135ms (135ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 104ms (104ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.containers{router} Created Created container: router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.containers{router} Started Started container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 145ms (145ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.containers{router} Created Created container: router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.containers{router} Started Started container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 108ms (108ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz.spec.containers{router} Created Created container: router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:28 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz.spec.containers{router} Started Started container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:29 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 120ms (120ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:29 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 91ms (91ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:29 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 96ms (96ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:30 +0000 UTC Warning Pod gr-self-healing-router-9b57d7865-n6vjv.spec.containers{router} BackOff Back-off restarting failed container router in pod gr-self-healing-router-9b57d7865-n6vjv_kuttl-test-creative-mastodon(812b9df4-bf21-4b57-bf91-6ae6e0e7ee9b) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:30 +0000 UTC Warning Pod gr-self-healing-router-9b57d7865-r2l9r.spec.containers{router} BackOff Back-off restarting failed container router in pod gr-self-healing-router-9b57d7865-r2l9r_kuttl-test-creative-mastodon(a3f83823-cc1c-4bcb-a65e-11c98b618370) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:30 +0000 UTC Warning Pod gr-self-healing-router-9b57d7865-tq4pz.spec.containers{router} BackOff Back-off restarting failed container router in pod gr-self-healing-router-9b57d7865-tq4pz_kuttl-test-creative-mastodon(f788b451-98d3-4d26-97b9-0a5779463adb) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:44 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 134ms (134ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:45 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 102ms (102ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:12:46 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 117ms (117ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:00 +0000 UTC Normal Pod gr-self-healing-mysql-1 Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-mysql-1 to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-95l2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:10 +0000 UTC Normal Pod gr-self-healing-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-75f83107-6073-455a-a5b0-4766dee74cb3" attachdetach-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:12 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:12 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 155ms (155ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:12 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:12 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:14 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:14 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 99ms (99ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:14 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:14 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:14 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:14 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 108ms (108ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:14 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:14 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:16 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 114ms (115ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:13:20 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router" in 94ms (94ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:14:28 +0000 UTC Warning Pod gr-self-healing-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/19 09:13:32 Bootstrap starting... 2025/03/19 09:13:32 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 applierWorkerThreads will be set to the default value of 4. The instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is valid to be used in an InnoDB cluster. The instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is already ready to be used in an InnoDB cluster. Successfully enabled parallel appliers. 2025/03/19 09:13:32 Instance (gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon) configured to join to the InnoDB cluster 2025/03/19 09:13:32 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon] 2025/03/19 09:13:32 Running dba.getCluster('grselfhealing') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/03/19 09:13:32 Connected to peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon 2025/03/19 09:13:33 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Topology: Member 0 Address: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] 2025/03/19 09:13:33 Adding instance (gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon) to InnoDB cluster 2025/03/19 09:13:33 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: The instance 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is running auto-rejoin process, which will be cancelled. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306... This instance reports its own address as gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is being cloned from gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 ** Stage DROP DATA: \ ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:14:28 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:14:28 +0000 UTC Warning Pod gr-self-healing-mysql-1.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:14:28 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 117ms (117ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:02 +0000 UTC Normal Pod gr-self-healing-mysql-2 Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-mysql-2 to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-dngr default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:10 +0000 UTC Normal Pod gr-self-healing-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-1e318172-5767-4cf4-ae6f-b3f012b2117d" attachdetach-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:11 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:11 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 146ms (146ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:11 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:11 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:13 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:13 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 114ms (114ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:13 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:13 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:13 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:13 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 113ms (113ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:13 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:15:14 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:16:27 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/19 09:15:31 Bootstrap starting... 2025/03/19 09:15:31 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 applierWorkerThreads will be set to the default value of 4. The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is valid to be used in an InnoDB cluster. The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is already ready to be used in an InnoDB cluster. Successfully enabled parallel appliers. 2025/03/19 09:15:31 Instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon) configured to join to the InnoDB cluster 2025/03/19 09:15:31 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon] 2025/03/19 09:15:31 Running dba.getCluster('grselfhealing') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/03/19 09:15:32 Connected to peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon 2025/03/19 09:15:32 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Topology: Member 0 Address: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] Member 1 Address: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] 2025/03/19 09:15:32 Adding instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon) to InnoDB cluster 2025/03/19 09:15:32 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is running auto-rejoin process, which will be cancelled. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is being cloned from gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 ** Stage DROP DATA: \ ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:16:27 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:16:27 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:16:27 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 101ms (101ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:30 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:30 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:30 +0000 UTC Normal Pod gr-self-healing-mysql-0 Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-mysql-0 to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-fhs2 default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:30 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:30 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:31 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:31 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 131ms (131ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:31 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:31 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:33 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:33 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 111ms (111ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:33 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:33 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:33 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:33 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 109ms (109ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:33 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:33 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:17:34 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/03/19 09:17:34 readiness check failed: connect to db: ping DB: dial tcp 10.188.98.27:33062: connect: connection refused kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:05 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/19 09:17:51 Bootstrap starting... 2025/03/19 09:17:51 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 applierWorkerThreads will be set to the default value of 4. The instance 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is valid to be used in an InnoDB cluster. The instance 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is already ready to be used in an InnoDB cluster. Successfully enabled parallel appliers. 2025/03/19 09:17:51 Instance (gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon) configured to join to the InnoDB cluster 2025/03/19 09:17:51 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon] 2025/03/19 09:17:51 Running dba.getCluster('grselfhealing') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance does not belong to that metadata, and GR is not active) (RuntimeError) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2025/03/19 09:17:52 Failed get cluster from peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon, stdout: stderr: Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. Dba.getCluster: This function is not available through a session to a standalone instance (metadata exists, instance does not belong to that metadata, and GR is not active) (RuntimeError) at (command line):1:5 in dba.getCluster('grselfhealing') ^ 2025/03/19 09:17:52 Running dba.getCluster('grselfhealing') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/03/19 09:17:52 Connected to peer gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon 2025/03/19 09:17:53 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE_PARTIAL StatusText: Cluster is NOT tolerant to any failures. 1 member is not active. SSL: REQUIRED Primary: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Topology: Member 0 Address: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] Member 1 Address: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: (MISSING) Errors: [] 2025/03/19 09:17:53 Adding instance (gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon) to InnoDB cluster 2025/03/19 09:17:53 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306... This instance reports its own address as gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... ERROR: Unable to enable clone on the instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306': MySQL Error 2005: Could not open connection to 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306': Unknown MySQL server host 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon' (-2) Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is being cloned from gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 0% In Progress PAGE COPY 0% Not Started REDO COPY 0% Not Started** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:05 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:05 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:05 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 123ms (123ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:36 +0000 UTC Normal Pod gr-self-healing-mysql-2 Binding Scheduled Successfully assigned kuttl-test-creative-mastodon/gr-self-healing-mysql-2 to gke-jen-ps-869-ff26afb0--default-pool-78f4a38b-dngr default-scheduler logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:44 +0000 UTC Normal Pod gr-self-healing-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-1e318172-5767-4cf4-ae6f-b3f012b2117d" attachdetach-controller logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:45 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:45 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-869-ff26afb0" in 164ms (164ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:45 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:45 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:47 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:47 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 100ms (100ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:47 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:47 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:47 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:47 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 125ms (125ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:47 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:18:47 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:19:59 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/19 09:19:05 Bootstrap starting... 2025/03/19 09:19:05 Running dba.configureLocalInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 applierWorkerThreads will be set to the default value of 4. The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is valid to be used in an InnoDB cluster. The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is already ready to be used in an InnoDB cluster. Successfully enabled parallel appliers. 2025/03/19 09:19:05 Instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon) configured to join to the InnoDB cluster 2025/03/19 09:19:05 peers: [gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon] 2025/03/19 09:19:05 Running dba.getCluster('grselfhealing') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/03/19 09:19:06 Connected to peer gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon 2025/03/19 09:19:06 Cluster status: ClusterName: grselfhealing Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Topology: Member 0 Address: gr-self-healing-mysql-0.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] Member 1 Address: gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 State: ONLINE Errors: [] 2025/03/19 09:19:06 Adding instance (gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon) to InnoDB cluster 2025/03/19 09:19:06 Running dba.getCluster('grselfhealing').addInstance('operator:*****@gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: The instance 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306' is running auto-rejoin process, which will be cancelled. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306... This instance reports its own address as gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is being cloned from gr-self-healing-mysql-1.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 0% In Progress PAGE COPY 0% Not Started REDO COPY 0% Not Started** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-self-healing-mysql-2.gr-self-healing-mysql.kuttl-test-creative-mastodon:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:19:59 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:19:59 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:20:00 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 110ms (110ms including waiting) kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:20:39 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-625fs.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:20:39 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-8glbz.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:20:39 +0000 UTC Normal Pod chaos-controller-manager-5f8b4885cf-9sg5n.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:20:40 +0000 UTC Normal Pod chaos-daemon-5bqbc.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:20:40 +0000 UTC Normal Pod chaos-daemon-n6ts5.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:20:40 +0000 UTC Normal Pod chaos-daemon-xdchq.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:23 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:23 +0000 UTC Normal Pod gr-self-healing-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:23 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:23 +0000 UTC Normal Pod gr-self-healing-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:23 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:23 +0000 UTC Normal Pod gr-self-healing-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:23 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-n6vjv.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:23 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-r2l9r.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:23 +0000 UTC Normal Pod gr-self-healing-router-9b57d7865-tq4pz.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:26 +0000 UTC Warning Pod gr-self-healing-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: [mysql] 2025/03/19 09:21:26 packets.go:46 read tcp 10.188.96.35:36994->10.188.96.35:33062: read: connection reset by peer 2025/03/19 09:21:26 readiness check failed: connect to db: ping DB: invalid connection kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:26 +0000 UTC Warning Pod gr-self-healing-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: [mysql] 2025/03/19 09:21:26 packets.go:46 read tcp 10.188.98.28:42650->10.188.98.28:33062: read: connection reset by peer 2025/03/19 09:21:26 readiness check failed: connect to db: ping DB: invalid connection kubelet logger.go:42: 09:21:30 | gr-self-healing | 2025-03-19 09:21:27 +0000 UTC Warning Pod gr-self-healing-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/03/19 09:21:27 readiness check failed: Member state: OFFLINE kubelet logger.go:42: 09:21:30 | gr-self-healing | Deleting namespace: kuttl-test-creative-mastodon === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- PASS: kuttl (1289.05s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/gr-self-healing (1288.62s) PASS