=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://35.225.180.226 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 34 tests === RUN kuttl/harness === RUN kuttl/harness/users === PAUSE kuttl/harness/users === CONT kuttl/harness/users logger.go:42: 03:27:15 | users | Creating namespace: kuttl-test-workable-clam logger.go:42: 03:27:15 | users/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 03:27:15 | users/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 03:27:15 | users/0-deploy-operator | + source ../../functions logger.go:42: 03:27:15 | users/0-deploy-operator | +++ realpath ../../.. logger.go:42: 03:27:15 | users/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:27:15 | users/0-deploy-operator | ++++ pwd logger.go:42: 03:27:15 | users/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/tests/users logger.go:42: 03:27:15 | users/0-deploy-operator | ++ test_name=users logger.go:42: 03:27:15 | users/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/vars.sh logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:27:15 | users/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy logger.go:42: 03:27:15 | users/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests logger.go:42: 03:27:15 | users/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf logger.go:42: 03:27:15 | users/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 03:27:15 | users/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 03:27:15 | users/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export GIT_BRANCH=PR-746 logger.go:42: 03:27:15 | users/0-deploy-operator | +++ GIT_BRANCH=PR-746 logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export VERSION=PR-746-e3b0b614 logger.go:42: 03:27:15 | users/0-deploy-operator | +++ VERSION=PR-746-e3b0b614 logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:27:15 | users/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:27:15 | users/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:27:15 | users/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:27:15 | users/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:27:15 | users/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:27:15 | users/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:27:15 | users/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 03:27:15 | users/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 03:27:15 | users/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 03:27:15 | users/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 03:27:15 | users/0-deploy-operator | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 03:27:15 | users/0-deploy-operator | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 03:27:15 | users/0-deploy-operator | ++++ which gdate logger.go:42: 03:27:15 | users/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-746/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:27:15 | users/0-deploy-operator | ++++ which date logger.go:42: 03:27:15 | users/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 03:27:15 | users/0-deploy-operator | +++ command -v oc logger.go:42: 03:27:15 | users/0-deploy-operator | +++ kubectl get nodes logger.go:42: 03:27:15 | users/0-deploy-operator | +++ grep '^minikube' logger.go:42: 03:27:16 | users/0-deploy-operator | + init_temp_dir logger.go:42: 03:27:16 | users/0-deploy-operator | + rm -rf /tmp/kuttl/ps/users logger.go:42: 03:27:16 | users/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/users logger.go:42: 03:27:16 | users/0-deploy-operator | + deploy_operator logger.go:42: 03:27:16 | users/0-deploy-operator | + destroy_operator logger.go:42: 03:27:16 | users/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 03:27:16 | users/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 03:27:16 | users/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 03:27:16 | users/0-deploy-operator | + true logger.go:42: 03:27:16 | users/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 03:27:16 | users/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 03:27:16 | users/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 03:27:17 | users/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 03:27:17 | users/0-deploy-operator | + true logger.go:42: 03:27:17 | users/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 03:27:17 | users/0-deploy-operator | + create_namespace ps-operator logger.go:42: 03:27:17 | users/0-deploy-operator | + local namespace=ps-operator logger.go:42: 03:27:17 | users/0-deploy-operator | + [[ -n '' ]] logger.go:42: 03:27:17 | users/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 03:27:17 | users/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 03:27:18 | users/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 03:27:18 | users/0-deploy-operator | namespace/ps-operator created logger.go:42: 03:27:18 | users/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy/crd.yaml logger.go:42: 03:27:19 | users/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 03:27:19 | users/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 03:27:20 | users/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 03:27:20 | users/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 03:27:20 | users/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy/cw-rbac.yaml logger.go:42: 03:27:21 | users/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 03:27:21 | users/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 03:27:22 | users/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 03:27:22 | users/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 03:27:22 | users/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 03:27:22 | users/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 03:27:22 | users/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 03:27:22 | users/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 03:27:22 | users/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:27:22 | users/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-746-e3b0b614"' /mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy/cw-operator.yaml logger.go:42: 03:27:23 | users/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 03:27:24 | users/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 03:27:24 | users/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 03:27:24 | users/0-deploy-operator | + kubectl -n kuttl-test-workable-clam apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf/secrets.yaml logger.go:42: 03:27:24 | users/0-deploy-operator | secret/test-secrets created logger.go:42: 03:27:24 | users/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 03:27:24 | users/0-deploy-operator | + kubectl -n kuttl-test-workable-clam apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf/ssl-secret.yaml logger.go:42: 03:27:25 | users/0-deploy-operator | secret/test-ssl created logger.go:42: 03:27:25 | users/0-deploy-operator | + deploy_client logger.go:42: 03:27:25 | users/0-deploy-operator | + kubectl -n kuttl-test-workable-clam apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf/client.yaml logger.go:42: 03:27:26 | users/0-deploy-operator | pod/mysql-client created logger.go:42: 03:27:28 | users/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 03:27:28 | users/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 03:27:28 | users/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 03:27:29 | users/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 03:27:29 | users/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 03:27:30 | users/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 03:27:31 | users/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 03:27:31 | users/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 03:27:32 | users/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 03:27:33 | users/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 03:27:33 | users/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 03:27:33 | users/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 03:27:35 | users/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 03:27:35 | users/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 03:27:35 | users/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 03:27:35 | users/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 03:27:35 | users/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 03:27:35 | users/0-deploy-operator | ASSERT PASS logger.go:42: 03:27:35 | users/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 03:27:35 | users/1-create-cluster | starting test step 1-create-cluster logger.go:42: 03:27:35 | users/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 03:27:35 | users/1-create-cluster | + source ../../functions logger.go:42: 03:27:35 | users/1-create-cluster | +++ realpath ../../.. logger.go:42: 03:27:35 | users/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:27:35 | users/1-create-cluster | ++++ pwd logger.go:42: 03:27:35 | users/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/tests/users logger.go:42: 03:27:35 | users/1-create-cluster | ++ test_name=users logger.go:42: 03:27:35 | users/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/vars.sh logger.go:42: 03:27:35 | users/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:27:35 | users/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:27:35 | users/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy logger.go:42: 03:27:35 | users/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy logger.go:42: 03:27:35 | users/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests logger.go:42: 03:27:35 | users/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests logger.go:42: 03:27:35 | users/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf logger.go:42: 03:27:35 | users/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf logger.go:42: 03:27:35 | users/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 03:27:35 | users/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 03:27:35 | users/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:27:35 | users/1-create-cluster | +++ export GIT_BRANCH=PR-746 logger.go:42: 03:27:35 | users/1-create-cluster | +++ GIT_BRANCH=PR-746 logger.go:42: 03:27:35 | users/1-create-cluster | +++ export VERSION=PR-746-e3b0b614 logger.go:42: 03:27:35 | users/1-create-cluster | +++ VERSION=PR-746-e3b0b614 logger.go:42: 03:27:35 | users/1-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:27:35 | users/1-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:27:35 | users/1-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:27:35 | users/1-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:27:35 | users/1-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:27:35 | users/1-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:27:35 | users/1-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:27:35 | users/1-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:27:35 | users/1-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:27:35 | users/1-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:27:35 | users/1-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:27:35 | users/1-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:27:35 | users/1-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:27:35 | users/1-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:27:35 | users/1-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 03:27:35 | users/1-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 03:27:35 | users/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 03:27:35 | users/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 03:27:35 | users/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 03:27:35 | users/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 03:27:35 | users/1-create-cluster | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 03:27:35 | users/1-create-cluster | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 03:27:35 | users/1-create-cluster | ++++ which gdate logger.go:42: 03:27:35 | users/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-746/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:27:35 | users/1-create-cluster | ++++ which date logger.go:42: 03:27:35 | users/1-create-cluster | +++ date=/usr/bin/date logger.go:42: 03:27:35 | users/1-create-cluster | +++ command -v oc logger.go:42: 03:27:35 | users/1-create-cluster | +++ kubectl get nodes logger.go:42: 03:27:35 | users/1-create-cluster | +++ grep '^minikube' logger.go:42: 03:27:36 | users/1-create-cluster | + get_cr logger.go:42: 03:27:36 | users/1-create-cluster | + local name_suffix= logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 03:27:36 | users/1-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 03:27:36 | users/1-create-cluster | + kubectl -n kuttl-test-workable-clam apply -f - logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-746-e3b0b614"' - logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 03:27:36 | users/1-create-cluster | + '[' -n '' ']' logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval - logger.go:42: 03:27:36 | users/1-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 03:27:36 | users/1-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 03:27:36 | users/1-create-cluster | ++ printf '.metadata.name="%s"' users logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.metadata.name="users"' /mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy/cr.yaml logger.go:42: 03:27:36 | users/1-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 03:27:36 | users/1-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 03:27:36 | users/1-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 03:27:36 | users/1-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 03:27:36 | users/1-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:27:36 | users/1-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 03:27:37 | users/1-create-cluster | perconaservermysql.ps.percona.com/users created logger.go:42: 03:30:46 | users/1-create-cluster | test step completed 1-create-cluster logger.go:42: 03:30:46 | users/2-check-users | starting test step 2-check-users logger.go:42: 03:30:46 | users/2-check-users | running command: [sh -c set -o errexit set -o xtrace source ../../functions mysql_args="-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" users=($(get_mysql_users "${mysql_args}")) args='' for user in "${users[@]}"; do host="%" case $user in heartbeat | xtrabackup) host="localhost" ;; esac query="SHOW GRANTS FOR '${user}'@'${host}';" run_mysql "${query}" "${mysql_args}" \ | sed -E "s/'(10|192)[.][0-9][^']*'//; s/'[^']*[.]internal'//" \ >"${TEMP_DIR}/${user}.sql" args="${args} --from-file=${user}=${TEMP_DIR}/${user}.sql" done kubectl create configmap -n "${NAMESPACE}" 02-check-users $args kubectl get configmap -n "${NAMESPACE}" 02-check-users -o yaml] logger.go:42: 03:30:46 | users/2-check-users | + source ../../functions logger.go:42: 03:30:46 | users/2-check-users | +++ realpath ../../.. logger.go:42: 03:30:46 | users/2-check-users | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:30:46 | users/2-check-users | ++++ pwd logger.go:42: 03:30:46 | users/2-check-users | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/tests/users logger.go:42: 03:30:46 | users/2-check-users | ++ test_name=users logger.go:42: 03:30:46 | users/2-check-users | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/vars.sh logger.go:42: 03:30:46 | users/2-check-users | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:30:46 | users/2-check-users | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:30:46 | users/2-check-users | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy logger.go:42: 03:30:46 | users/2-check-users | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy logger.go:42: 03:30:46 | users/2-check-users | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests logger.go:42: 03:30:46 | users/2-check-users | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests logger.go:42: 03:30:46 | users/2-check-users | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf logger.go:42: 03:30:46 | users/2-check-users | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf logger.go:42: 03:30:46 | users/2-check-users | +++ export TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 03:30:46 | users/2-check-users | +++ TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 03:30:46 | users/2-check-users | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:30:46 | users/2-check-users | +++ export GIT_BRANCH=PR-746 logger.go:42: 03:30:46 | users/2-check-users | +++ GIT_BRANCH=PR-746 logger.go:42: 03:30:46 | users/2-check-users | +++ export VERSION=PR-746-e3b0b614 logger.go:42: 03:30:46 | users/2-check-users | +++ VERSION=PR-746-e3b0b614 logger.go:42: 03:30:46 | users/2-check-users | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:30:46 | users/2-check-users | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:30:46 | users/2-check-users | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:30:46 | users/2-check-users | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:30:46 | users/2-check-users | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:30:46 | users/2-check-users | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:30:46 | users/2-check-users | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:30:46 | users/2-check-users | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:30:46 | users/2-check-users | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:30:46 | users/2-check-users | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:30:46 | users/2-check-users | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:30:46 | users/2-check-users | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:30:46 | users/2-check-users | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:30:46 | users/2-check-users | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:30:46 | users/2-check-users | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 03:30:46 | users/2-check-users | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 03:30:46 | users/2-check-users | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 03:30:46 | users/2-check-users | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 03:30:46 | users/2-check-users | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 03:30:46 | users/2-check-users | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 03:30:46 | users/2-check-users | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 03:30:46 | users/2-check-users | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 03:30:46 | users/2-check-users | ++++ which gdate logger.go:42: 03:30:46 | users/2-check-users | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-746/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:30:46 | users/2-check-users | ++++ which date logger.go:42: 03:30:46 | users/2-check-users | +++ date=/usr/bin/date logger.go:42: 03:30:46 | users/2-check-users | +++ command -v oc logger.go:42: 03:30:46 | users/2-check-users | +++ kubectl get nodes logger.go:42: 03:30:46 | users/2-check-users | +++ grep '^minikube' logger.go:42: 03:30:47 | users/2-check-users | +++ get_cluster_name logger.go:42: 03:30:47 | users/2-check-users | +++ kubectl -n kuttl-test-workable-clam get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:30:47 | users/2-check-users | ++ get_haproxy_svc users logger.go:42: 03:30:47 | users/2-check-users | ++ local cluster=users logger.go:42: 03:30:47 | users/2-check-users | ++ echo users-haproxy logger.go:42: 03:30:47 | users/2-check-users | + mysql_args='-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:47 | users/2-check-users | + users=($(get_mysql_users "${mysql_args}")) logger.go:42: 03:30:47 | users/2-check-users | ++ get_mysql_users '-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:47 | users/2-check-users | ++ local 'args=-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:47 | users/2-check-users | ++ run_mysql 'SELECT user FROM mysql.user' '-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:47 | users/2-check-users | ++ grep -vE 'mysql|root' logger.go:42: 03:30:47 | users/2-check-users | ++ local 'command=SELECT user FROM mysql.user' logger.go:42: 03:30:47 | users/2-check-users | ++ local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:47 | users/2-check-users | ++ local pod= logger.go:42: 03:30:47 | users/2-check-users | +++ get_client_pod logger.go:42: 03:30:47 | users/2-check-users | +++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:30:48 | users/2-check-users | ++ client_pod=mysql-client logger.go:42: 03:30:48 | users/2-check-users | ++ wait_pod mysql-client logger.go:42: 03:30:48 | users/2-check-users | ++ local pod=mysql-client logger.go:42: 03:30:48 | users/2-check-users | ++ set +o xtrace logger.go:42: 03:30:48 | users/2-check-users | mysql-clienttrue logger.go:42: 03:30:48 | users/2-check-users | ++ kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT user FROM mysql.user" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 03:30:48 | users/2-check-users | ++ sed -e 's/mysql: //' logger.go:42: 03:30:48 | users/2-check-users | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:30:50 | users/2-check-users | + args= logger.go:42: 03:30:50 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 03:30:50 | users/2-check-users | + host=% logger.go:42: 03:30:50 | users/2-check-users | + case $user in logger.go:42: 03:30:50 | users/2-check-users | + query='SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';' logger.go:42: 03:30:50 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 03:30:50 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:50 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';' logger.go:42: 03:30:50 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:50 | users/2-check-users | + local pod= logger.go:42: 03:30:50 | users/2-check-users | ++ get_client_pod logger.go:42: 03:30:50 | users/2-check-users | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:30:50 | users/2-check-users | + client_pod=mysql-client logger.go:42: 03:30:50 | users/2-check-users | + wait_pod mysql-client logger.go:42: 03:30:50 | users/2-check-users | + local pod=mysql-client logger.go:42: 03:30:50 | users/2-check-users | + set +o xtrace logger.go:42: 03:30:50 | users/2-check-users | mysql-clienttrue logger.go:42: 03:30:50 | users/2-check-users | + kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 03:30:50 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 03:30:50 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:30:52 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql' logger.go:42: 03:30:52 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 03:30:52 | users/2-check-users | + host=% logger.go:42: 03:30:52 | users/2-check-users | + case $user in logger.go:42: 03:30:52 | users/2-check-users | + query='SHOW GRANTS FOR '\''operator'\''@'\''%'\'';' logger.go:42: 03:30:52 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''operator'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:52 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''operator'\''@'\''%'\'';' logger.go:42: 03:30:52 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 03:30:52 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:52 | users/2-check-users | + local pod= logger.go:42: 03:30:52 | users/2-check-users | ++ get_client_pod logger.go:42: 03:30:52 | users/2-check-users | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:30:52 | users/2-check-users | + client_pod=mysql-client logger.go:42: 03:30:52 | users/2-check-users | + wait_pod mysql-client logger.go:42: 03:30:52 | users/2-check-users | + local pod=mysql-client logger.go:42: 03:30:52 | users/2-check-users | + set +o xtrace logger.go:42: 03:30:53 | users/2-check-users | mysql-clienttrue logger.go:42: 03:30:53 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 03:30:53 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:30:53 | users/2-check-users | + kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''operator'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 03:30:54 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql' logger.go:42: 03:30:54 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 03:30:54 | users/2-check-users | + host=% logger.go:42: 03:30:54 | users/2-check-users | + case $user in logger.go:42: 03:30:54 | users/2-check-users | + query='SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';' logger.go:42: 03:30:54 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:54 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 03:30:54 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';' logger.go:42: 03:30:54 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:54 | users/2-check-users | + local pod= logger.go:42: 03:30:54 | users/2-check-users | ++ get_client_pod logger.go:42: 03:30:54 | users/2-check-users | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:30:55 | users/2-check-users | + client_pod=mysql-client logger.go:42: 03:30:55 | users/2-check-users | + wait_pod mysql-client logger.go:42: 03:30:55 | users/2-check-users | + local pod=mysql-client logger.go:42: 03:30:55 | users/2-check-users | + set +o xtrace logger.go:42: 03:30:55 | users/2-check-users | mysql-clienttrue logger.go:42: 03:30:55 | users/2-check-users | + kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 03:30:55 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 03:30:55 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:30:57 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql --from-file=orchestrator=/tmp/kuttl/ps/users/orchestrator.sql' logger.go:42: 03:30:57 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 03:30:57 | users/2-check-users | + host=% logger.go:42: 03:30:57 | users/2-check-users | + case $user in logger.go:42: 03:30:57 | users/2-check-users | + query='SHOW GRANTS FOR '\''replication'\''@'\''%'\'';' logger.go:42: 03:30:57 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''replication'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:57 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''replication'\''@'\''%'\'';' logger.go:42: 03:30:57 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:57 | users/2-check-users | + local pod= logger.go:42: 03:30:57 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 03:30:57 | users/2-check-users | ++ get_client_pod logger.go:42: 03:30:57 | users/2-check-users | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:30:57 | users/2-check-users | + client_pod=mysql-client logger.go:42: 03:30:57 | users/2-check-users | + wait_pod mysql-client logger.go:42: 03:30:57 | users/2-check-users | + local pod=mysql-client logger.go:42: 03:30:57 | users/2-check-users | + set +o xtrace logger.go:42: 03:30:58 | users/2-check-users | mysql-clienttrue logger.go:42: 03:30:58 | users/2-check-users | + kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''replication'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 03:30:58 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 03:30:58 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:30:59 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql --from-file=orchestrator=/tmp/kuttl/ps/users/orchestrator.sql --from-file=replication=/tmp/kuttl/ps/users/replication.sql' logger.go:42: 03:30:59 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 03:30:59 | users/2-check-users | + host=% logger.go:42: 03:30:59 | users/2-check-users | + case $user in logger.go:42: 03:30:59 | users/2-check-users | + host=localhost logger.go:42: 03:30:59 | users/2-check-users | + query='SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';' logger.go:42: 03:30:59 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 03:30:59 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:59 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';' logger.go:42: 03:30:59 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 03:30:59 | users/2-check-users | + local pod= logger.go:42: 03:30:59 | users/2-check-users | ++ get_client_pod logger.go:42: 03:30:59 | users/2-check-users | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:31:00 | users/2-check-users | + client_pod=mysql-client logger.go:42: 03:31:00 | users/2-check-users | + wait_pod mysql-client logger.go:42: 03:31:00 | users/2-check-users | + local pod=mysql-client logger.go:42: 03:31:00 | users/2-check-users | + set +o xtrace logger.go:42: 03:31:00 | users/2-check-users | mysql-clienttrue logger.go:42: 03:31:00 | users/2-check-users | + kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 03:31:00 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 03:31:00 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:31:02 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql --from-file=orchestrator=/tmp/kuttl/ps/users/orchestrator.sql --from-file=replication=/tmp/kuttl/ps/users/replication.sql --from-file=heartbeat=/tmp/kuttl/ps/users/heartbeat.sql' logger.go:42: 03:31:02 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 03:31:02 | users/2-check-users | + host=% logger.go:42: 03:31:02 | users/2-check-users | + case $user in logger.go:42: 03:31:02 | users/2-check-users | + host=localhost logger.go:42: 03:31:02 | users/2-check-users | + query='SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';' logger.go:42: 03:31:02 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 03:31:02 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 03:31:02 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';' logger.go:42: 03:31:02 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 03:31:02 | users/2-check-users | + local pod= logger.go:42: 03:31:02 | users/2-check-users | ++ get_client_pod logger.go:42: 03:31:02 | users/2-check-users | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:31:02 | users/2-check-users | + client_pod=mysql-client logger.go:42: 03:31:02 | users/2-check-users | + wait_pod mysql-client logger.go:42: 03:31:02 | users/2-check-users | + local pod=mysql-client logger.go:42: 03:31:02 | users/2-check-users | + set +o xtrace logger.go:42: 03:31:02 | users/2-check-users | mysql-clienttrue logger.go:42: 03:31:02 | users/2-check-users | + kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 03:31:02 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 03:31:02 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:31:04 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql --from-file=orchestrator=/tmp/kuttl/ps/users/orchestrator.sql --from-file=replication=/tmp/kuttl/ps/users/replication.sql --from-file=heartbeat=/tmp/kuttl/ps/users/heartbeat.sql --from-file=xtrabackup=/tmp/kuttl/ps/users/xtrabackup.sql' logger.go:42: 03:31:04 | users/2-check-users | + kubectl create configmap -n kuttl-test-workable-clam 02-check-users --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql --from-file=orchestrator=/tmp/kuttl/ps/users/orchestrator.sql --from-file=replication=/tmp/kuttl/ps/users/replication.sql --from-file=heartbeat=/tmp/kuttl/ps/users/heartbeat.sql --from-file=xtrabackup=/tmp/kuttl/ps/users/xtrabackup.sql logger.go:42: 03:31:04 | users/2-check-users | configmap/02-check-users created logger.go:42: 03:31:04 | users/2-check-users | + kubectl get configmap -n kuttl-test-workable-clam 02-check-users -o yaml logger.go:42: 03:31:05 | users/2-check-users | apiVersion: v1 logger.go:42: 03:31:05 | users/2-check-users | data: logger.go:42: 03:31:05 | users/2-check-users | heartbeat: | logger.go:42: 03:31:05 | users/2-check-users | GRANT REPLICATION CLIENT ON *.* TO `heartbeat`@`localhost` logger.go:42: 03:31:05 | users/2-check-users | GRANT SYSTEM_USER ON *.* TO `heartbeat`@`localhost` logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ON `sys_operator`.`heartbeat` TO `heartbeat`@`localhost` logger.go:42: 03:31:05 | users/2-check-users | monitor: | logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT, RELOAD, PROCESS, SUPER, REPLICATION CLIENT ON *.* TO `monitor`@`%` logger.go:42: 03:31:05 | users/2-check-users | GRANT BACKUP_ADMIN,SERVICE_CONNECTION_ADMIN,SYSTEM_USER ON *.* TO `monitor`@`%` logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT ON `performance_schema`.* TO `monitor`@`%` logger.go:42: 03:31:05 | users/2-check-users | operator: | logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, SUPER, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, CREATE ROLE, DROP ROLE ON *.* TO `operator`@`%` WITH GRANT OPTION logger.go:42: 03:31:05 | users/2-check-users | GRANT APPLICATION_PASSWORD_ADMIN,AUDIT_ABORT_EXEMPT,AUDIT_ADMIN,AUTHENTICATION_POLICY_ADMIN,BACKUP_ADMIN,BINLOG_ADMIN,BINLOG_ENCRYPTION_ADMIN,CLONE_ADMIN,CONNECTION_ADMIN,ENCRYPTION_KEY_ADMIN,FIREWALL_EXEMPT,FLUSH_OPTIMIZER_COSTS,FLUSH_STATUS,FLUSH_TABLES,FLUSH_USER_RESOURCES,GROUP_REPLICATION_ADMIN,GROUP_REPLICATION_STREAM,INNODB_REDO_LOG_ARCHIVE,INNODB_REDO_LOG_ENABLE,PASSWORDLESS_USER_ADMIN,PERSIST_RO_VARIABLES_ADMIN,REPLICATION_APPLIER,REPLICATION_SLAVE_ADMIN,RESOURCE_GROUP_ADMIN,RESOURCE_GROUP_USER,ROLE_ADMIN,SENSITIVE_VARIABLES_OBSERVER,SERVICE_CONNECTION_ADMIN,SESSION_VARIABLES_ADMIN,SET_USER_ID,SHOW_ROUTINE,SYSTEM_USER,SYSTEM_VARIABLES_ADMIN,TABLE_ENCRYPTION_ADMIN,TELEMETRY_LOG_ADMIN,XA_RECOVER_ADMIN ON *.* TO `operator`@`%` WITH GRANT OPTION logger.go:42: 03:31:05 | users/2-check-users | orchestrator: | logger.go:42: 03:31:05 | users/2-check-users | GRANT RELOAD, PROCESS, SUPER, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO `orchestrator`@`%` logger.go:42: 03:31:05 | users/2-check-users | GRANT SYSTEM_USER ON *.* TO `orchestrator`@`%` logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT ON `sys_operator`.* TO `orchestrator`@`%` logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT ON `mysql`.`slave_master_info` TO `orchestrator`@`%` logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT ON `performance_schema`.`replication_group_members` TO `orchestrator`@`%` logger.go:42: 03:31:05 | users/2-check-users | replication: | logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT, RELOAD, SHUTDOWN, PROCESS, FILE, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE USER ON *.* TO `replication`@`%` WITH GRANT OPTION logger.go:42: 03:31:05 | users/2-check-users | GRANT BACKUP_ADMIN,CLONE_ADMIN,CONNECTION_ADMIN,GROUP_REPLICATION_ADMIN,GROUP_REPLICATION_STREAM,PERSIST_RO_VARIABLES_ADMIN,REPLICATION_APPLIER,REPLICATION_SLAVE_ADMIN,ROLE_ADMIN,SYSTEM_USER,SYSTEM_VARIABLES_ADMIN ON *.* TO `replication`@`%` WITH GRANT OPTION logger.go:42: 03:31:05 | users/2-check-users | GRANT INSERT, UPDATE, DELETE ON `mysql`.* TO `replication`@`%` WITH GRANT OPTION logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT ON `performance_schema`.`threads` TO `replication`@`%` logger.go:42: 03:31:05 | users/2-check-users | xtrabackup: | logger.go:42: 03:31:05 | users/2-check-users | GRANT RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT ON *.* TO `xtrabackup`@`localhost` logger.go:42: 03:31:05 | users/2-check-users | GRANT BACKUP_ADMIN,GROUP_REPLICATION_ADMIN,REPLICATION_SLAVE_ADMIN,SYSTEM_USER ON *.* TO `xtrabackup`@`localhost` logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT ON `performance_schema`.`keyring_component_status` TO `xtrabackup`@`localhost` logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT ON `performance_schema`.`log_status` TO `xtrabackup`@`localhost` logger.go:42: 03:31:05 | users/2-check-users | GRANT SELECT ON `performance_schema`.`replication_group_members` TO `xtrabackup`@`localhost` logger.go:42: 03:31:05 | users/2-check-users | kind: ConfigMap logger.go:42: 03:31:05 | users/2-check-users | metadata: logger.go:42: 03:31:05 | users/2-check-users | creationTimestamp: "2024-09-18T03:31:04Z" logger.go:42: 03:31:05 | users/2-check-users | name: 02-check-users logger.go:42: 03:31:05 | users/2-check-users | namespace: kuttl-test-workable-clam logger.go:42: 03:31:05 | users/2-check-users | resourceVersion: "43034" logger.go:42: 03:31:05 | users/2-check-users | uid: 6e3e53ed-cf78-4a9c-9c41-f8b20e2b7a5b [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 21 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc00026bc00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc00026bc00}, 0x0}, {0x184a055?, 0xc000789f80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc000424a10, {0x1accd90, 0xc00026a040}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc0000cf448?, {0x0, 0xc000424a10, {0x1accd90, 0xc00026a040}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc0000cf448, {0x0, 0xc000424a10, {0x1accd90, 0xc00026a040}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc0002ff208, 0xa2?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc0004ec750, 0xc000113520, {0xc000465b30, 0x18}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc0004ec750, 0xc000113520, {0xc000465b30, 0x18}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc00010f680, 0xc000113520, 0xc000559dd0) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc000113520) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc000113520, 0xc00054a6d8) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 20 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 03:31:05 | users/2-check-users | test step completed 2-check-users logger.go:42: 03:31:05 | users/3-update-passwords | starting test step 3-update-passwords logger.go:42: 03:31:06 | users/3-update-passwords | Secret:kuttl-test-workable-clam/test-secrets updated logger.go:42: 03:31:14 | users/3-update-passwords | test step completed 3-update-passwords logger.go:42: 03:31:14 | users/4-check-cluster | starting test step 4-check-cluster logger.go:42: 03:31:14 | users/4-check-cluster | running command: [sh -c set -o pipefail set -o errexit set -o xtrace source ../../functions sleep 30 # wait for cluster status to change to initializing wait_cluster_consistency_async "${test_name}" "3" "3" mysql_args="-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password_updated" users=($(get_mysql_users "${mysql_args}")) # check connection args="" set +o errexit for user in "${users[@]}"; do mysql_args="-h $(get_haproxy_svc $(get_cluster_name)) -u${user} -p${user}_password_updated" pod=mysql-client case $user in heartbeat | xtrabackup) mysql_args="-h localhost -u${user} -p${user}_password_updated" pod="$(get_cluster_name)-mysql-0" ;; esac run_mysql "SELECT 1" "${mysql_args}" "${pod}" args="${args} --from-literal=${user}=$([ $? -eq 0 ] && echo 'success' || echo 'fail')" done set -o errexit kubectl create configmap -n "${NAMESPACE}" 04-check-connections $args kubectl get configmap -n "${NAMESPACE}" 04-check-connections -o yaml # check replication wait_cluster_consistency_async "${test_name}" "3" "3" orc_host=$(get_orc_headless_fqdn $(get_cluster_name) 0) cluster=$(run_curl "http://${orc_host}:3000/api/clusters/" | jq -r .[0]) replicating=$(run_curl "http://${orc_host}:3000/api/cluster/${cluster}/" \ | tee \ | jq -r '.[] | "\(.ReplicationSQLThreadRuning) \(.ReplicationIOThreadRuning)"' \ | grep "true" \ | wc -l \ | sed 's/ *//') kubectl create configmap -n "${NAMESPACE}" 04-check-replication --from-literal=replicating="${replicating}"] logger.go:42: 03:31:14 | users/4-check-cluster | + source ../../functions logger.go:42: 03:31:14 | users/4-check-cluster | +++ realpath ../../.. logger.go:42: 03:31:14 | users/4-check-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:31:14 | users/4-check-cluster | ++++ pwd logger.go:42: 03:31:14 | users/4-check-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/tests/users logger.go:42: 03:31:14 | users/4-check-cluster | ++ test_name=users logger.go:42: 03:31:14 | users/4-check-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/vars.sh logger.go:42: 03:31:14 | users/4-check-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:31:14 | users/4-check-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:31:14 | users/4-check-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy logger.go:42: 03:31:14 | users/4-check-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy logger.go:42: 03:31:14 | users/4-check-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests logger.go:42: 03:31:14 | users/4-check-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests logger.go:42: 03:31:14 | users/4-check-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf logger.go:42: 03:31:14 | users/4-check-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf logger.go:42: 03:31:14 | users/4-check-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 03:31:14 | users/4-check-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 03:31:14 | users/4-check-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:31:14 | users/4-check-cluster | +++ export GIT_BRANCH=PR-746 logger.go:42: 03:31:14 | users/4-check-cluster | +++ GIT_BRANCH=PR-746 logger.go:42: 03:31:14 | users/4-check-cluster | +++ export VERSION=PR-746-e3b0b614 logger.go:42: 03:31:14 | users/4-check-cluster | +++ VERSION=PR-746-e3b0b614 logger.go:42: 03:31:14 | users/4-check-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:31:14 | users/4-check-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:31:14 | users/4-check-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:31:14 | users/4-check-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:31:14 | users/4-check-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:31:14 | users/4-check-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:31:14 | users/4-check-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:31:14 | users/4-check-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:31:14 | users/4-check-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:31:14 | users/4-check-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:31:14 | users/4-check-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:31:14 | users/4-check-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:31:14 | users/4-check-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:31:14 | users/4-check-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:31:14 | users/4-check-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 03:31:14 | users/4-check-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 03:31:14 | users/4-check-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 03:31:14 | users/4-check-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 03:31:14 | users/4-check-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 03:31:14 | users/4-check-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 03:31:14 | users/4-check-cluster | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 03:31:14 | users/4-check-cluster | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 03:31:14 | users/4-check-cluster | ++++ which gdate logger.go:42: 03:31:14 | users/4-check-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-746/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:31:14 | users/4-check-cluster | ++++ which date logger.go:42: 03:31:14 | users/4-check-cluster | +++ date=/usr/bin/date logger.go:42: 03:31:14 | users/4-check-cluster | +++ command -v oc logger.go:42: 03:31:14 | users/4-check-cluster | +++ kubectl get nodes logger.go:42: 03:31:14 | users/4-check-cluster | +++ grep '^minikube' logger.go:42: 03:31:14 | users/4-check-cluster | + sleep 30 logger.go:42: 03:31:44 | users/4-check-cluster | + wait_cluster_consistency_async users 3 3 logger.go:42: 03:31:44 | users/4-check-cluster | + local cluster_name=users logger.go:42: 03:31:44 | users/4-check-cluster | + local cluster_size=3 logger.go:42: 03:31:44 | users/4-check-cluster | + local orc_size=3 logger.go:42: 03:31:44 | users/4-check-cluster | + '[' -z 3 ']' logger.go:42: 03:31:44 | users/4-check-cluster | + sleep 7 logger.go:42: 03:31:51 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:31:52 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:31:52 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:31:52 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:31:52 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:31:53 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 03:31:53 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 03:31:53 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 03:31:53 | users/4-check-cluster | + sleep 15 logger.go:42: 03:32:08 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:32:08 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:32:08 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:32:09 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:32:09 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:32:09 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 03:32:09 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 03:32:09 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 03:32:09 | users/4-check-cluster | + sleep 15 logger.go:42: 03:32:24 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:32:24 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:32:24 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:32:25 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:32:25 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:32:25 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 03:32:25 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 03:32:25 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 03:32:25 | users/4-check-cluster | + sleep 15 logger.go:42: 03:32:40 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:32:41 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:32:41 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:32:41 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:32:41 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:32:41 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 03:32:41 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 03:32:41 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 03:32:41 | users/4-check-cluster | + sleep 15 logger.go:42: 03:32:56 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:32:57 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:32:57 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:32:57 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:32:57 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:32:58 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 03:32:58 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 03:32:58 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 03:32:58 | users/4-check-cluster | + sleep 15 logger.go:42: 03:33:13 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:33:13 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:33:13 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:33:14 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:33:14 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:33:14 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 03:33:14 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 03:33:14 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 03:33:14 | users/4-check-cluster | + sleep 15 logger.go:42: 03:33:29 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:33:29 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:33:29 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:33:30 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:33:30 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:33:30 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 03:33:30 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 03:33:30 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 03:33:30 | users/4-check-cluster | + sleep 15 logger.go:42: 03:33:45 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:33:46 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:33:46 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:33:46 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:33:46 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:33:46 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 03:33:46 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 03:33:46 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 03:33:46 | users/4-check-cluster | + sleep 15 logger.go:42: 03:34:01 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:34:02 | users/4-check-cluster | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 03:34:02 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 03:34:02 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 03:34:02 | users/4-check-cluster | + sleep 15 logger.go:42: 03:34:17 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:34:17 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:34:17 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:34:17 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:34:17 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:34:18 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 03:34:18 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 03:34:18 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 03:34:18 | users/4-check-cluster | + sleep 15 logger.go:42: 03:34:33 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:34:33 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:34:33 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:34:34 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:34:34 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:34:34 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 03:34:34 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 03:34:34 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 03:34:34 | users/4-check-cluster | + sleep 15 logger.go:42: 03:34:49 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:34:49 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:34:49 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:34:50 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:34:50 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:34:50 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:34:50 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.state}' logger.go:42: 03:34:51 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:34:51 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.state}' logger.go:42: 03:34:51 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:34:51 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 03:34:51 | users/4-check-cluster | +++ kubectl -n kuttl-test-workable-clam get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:34:51 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 03:34:51 | users/4-check-cluster | ++ local cluster=users logger.go:42: 03:34:51 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 03:34:51 | users/4-check-cluster | + mysql_args='-h users-haproxy -uroot -proot_password_updated' logger.go:42: 03:34:51 | users/4-check-cluster | + users=($(get_mysql_users "${mysql_args}")) logger.go:42: 03:34:51 | users/4-check-cluster | ++ get_mysql_users '-h users-haproxy -uroot -proot_password_updated' logger.go:42: 03:34:51 | users/4-check-cluster | ++ local 'args=-h users-haproxy -uroot -proot_password_updated' logger.go:42: 03:34:51 | users/4-check-cluster | ++ run_mysql 'SELECT user FROM mysql.user' '-h users-haproxy -uroot -proot_password_updated' logger.go:42: 03:34:51 | users/4-check-cluster | ++ grep -vE 'mysql|root' logger.go:42: 03:34:51 | users/4-check-cluster | ++ local 'command=SELECT user FROM mysql.user' logger.go:42: 03:34:51 | users/4-check-cluster | ++ local 'uri=-h users-haproxy -uroot -proot_password_updated' logger.go:42: 03:34:51 | users/4-check-cluster | ++ local pod= logger.go:42: 03:34:51 | users/4-check-cluster | +++ get_client_pod logger.go:42: 03:34:51 | users/4-check-cluster | +++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:34:52 | users/4-check-cluster | ++ client_pod=mysql-client logger.go:42: 03:34:52 | users/4-check-cluster | ++ wait_pod mysql-client logger.go:42: 03:34:52 | users/4-check-cluster | ++ local pod=mysql-client logger.go:42: 03:34:52 | users/4-check-cluster | ++ set +o xtrace logger.go:42: 03:34:52 | users/4-check-cluster | mysql-clienttrue logger.go:42: 03:34:52 | users/4-check-cluster | ++ kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT user FROM mysql.user" | mysql -sN -h users-haproxy -uroot -proot_password_updated' logger.go:42: 03:34:52 | users/4-check-cluster | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:34:52 | users/4-check-cluster | ++ sed -e 's/mysql: //' logger.go:42: 03:34:54 | users/4-check-cluster | + args= logger.go:42: 03:34:54 | users/4-check-cluster | + set +o errexit logger.go:42: 03:34:54 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 03:34:54 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 03:34:54 | users/4-check-cluster | +++ kubectl -n kuttl-test-workable-clam get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:34:54 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 03:34:54 | users/4-check-cluster | ++ local cluster=users logger.go:42: 03:34:54 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 03:34:54 | users/4-check-cluster | + mysql_args='-h users-haproxy -umonitor -pmonitor_password_updated' logger.go:42: 03:34:54 | users/4-check-cluster | + pod=mysql-client logger.go:42: 03:34:54 | users/4-check-cluster | + case $user in logger.go:42: 03:34:54 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -umonitor -pmonitor_password_updated' mysql-client logger.go:42: 03:34:54 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 03:34:54 | users/4-check-cluster | + local 'uri=-h users-haproxy -umonitor -pmonitor_password_updated' logger.go:42: 03:34:54 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 03:34:54 | users/4-check-cluster | ++ get_client_pod logger.go:42: 03:34:54 | users/4-check-cluster | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:34:55 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 03:34:55 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 03:34:55 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 03:34:55 | users/4-check-cluster | + set +o xtrace logger.go:42: 03:34:55 | users/4-check-cluster | mysql-clienttrue logger.go:42: 03:34:55 | users/4-check-cluster | + kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -umonitor -pmonitor_password_updated' logger.go:42: 03:34:55 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 03:34:55 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:34:57 | users/4-check-cluster | 1 logger.go:42: 03:34:57 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 03:34:57 | users/4-check-cluster | ++ echo success logger.go:42: 03:34:57 | users/4-check-cluster | + args=' --from-literal=monitor=success' logger.go:42: 03:34:57 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 03:34:57 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 03:34:57 | users/4-check-cluster | +++ kubectl -n kuttl-test-workable-clam get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:34:58 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 03:34:58 | users/4-check-cluster | ++ local cluster=users logger.go:42: 03:34:58 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 03:34:58 | users/4-check-cluster | + mysql_args='-h users-haproxy -uoperator -poperator_password_updated' logger.go:42: 03:34:58 | users/4-check-cluster | + pod=mysql-client logger.go:42: 03:34:58 | users/4-check-cluster | + case $user in logger.go:42: 03:34:58 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -uoperator -poperator_password_updated' mysql-client logger.go:42: 03:34:58 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 03:34:58 | users/4-check-cluster | + local 'uri=-h users-haproxy -uoperator -poperator_password_updated' logger.go:42: 03:34:58 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 03:34:58 | users/4-check-cluster | ++ get_client_pod logger.go:42: 03:34:58 | users/4-check-cluster | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:34:58 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 03:34:58 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 03:34:58 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 03:34:58 | users/4-check-cluster | + set +o xtrace logger.go:42: 03:34:58 | users/4-check-cluster | mysql-clienttrue logger.go:42: 03:34:58 | users/4-check-cluster | + kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -uoperator -poperator_password_updated' logger.go:42: 03:34:58 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 03:34:58 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:35:00 | users/4-check-cluster | 1 logger.go:42: 03:35:00 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 03:35:00 | users/4-check-cluster | ++ echo success logger.go:42: 03:35:00 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success' logger.go:42: 03:35:00 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 03:35:00 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 03:35:00 | users/4-check-cluster | +++ kubectl -n kuttl-test-workable-clam get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:35:00 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 03:35:00 | users/4-check-cluster | ++ local cluster=users logger.go:42: 03:35:00 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 03:35:00 | users/4-check-cluster | + mysql_args='-h users-haproxy -uorchestrator -porchestrator_password_updated' logger.go:42: 03:35:00 | users/4-check-cluster | + pod=mysql-client logger.go:42: 03:35:00 | users/4-check-cluster | + case $user in logger.go:42: 03:35:00 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -uorchestrator -porchestrator_password_updated' mysql-client logger.go:42: 03:35:00 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 03:35:00 | users/4-check-cluster | + local 'uri=-h users-haproxy -uorchestrator -porchestrator_password_updated' logger.go:42: 03:35:00 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 03:35:00 | users/4-check-cluster | ++ get_client_pod logger.go:42: 03:35:00 | users/4-check-cluster | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:35:01 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 03:35:01 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 03:35:01 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 03:35:01 | users/4-check-cluster | + set +o xtrace logger.go:42: 03:35:01 | users/4-check-cluster | mysql-clienttrue logger.go:42: 03:35:01 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 03:35:01 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:35:01 | users/4-check-cluster | + kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -uorchestrator -porchestrator_password_updated' logger.go:42: 03:35:03 | users/4-check-cluster | 1 logger.go:42: 03:35:03 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 03:35:03 | users/4-check-cluster | ++ echo success logger.go:42: 03:35:03 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success' logger.go:42: 03:35:03 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 03:35:03 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 03:35:03 | users/4-check-cluster | +++ kubectl -n kuttl-test-workable-clam get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:35:03 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 03:35:03 | users/4-check-cluster | ++ local cluster=users logger.go:42: 03:35:03 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 03:35:03 | users/4-check-cluster | + mysql_args='-h users-haproxy -ureplication -preplication_password_updated' logger.go:42: 03:35:03 | users/4-check-cluster | + pod=mysql-client logger.go:42: 03:35:03 | users/4-check-cluster | + case $user in logger.go:42: 03:35:03 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -ureplication -preplication_password_updated' mysql-client logger.go:42: 03:35:03 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 03:35:03 | users/4-check-cluster | + local 'uri=-h users-haproxy -ureplication -preplication_password_updated' logger.go:42: 03:35:03 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 03:35:03 | users/4-check-cluster | ++ get_client_pod logger.go:42: 03:35:03 | users/4-check-cluster | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:35:04 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 03:35:04 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 03:35:04 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 03:35:04 | users/4-check-cluster | + set +o xtrace logger.go:42: 03:35:04 | users/4-check-cluster | mysql-clienttrue logger.go:42: 03:35:04 | users/4-check-cluster | + kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -ureplication -preplication_password_updated' logger.go:42: 03:35:04 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 03:35:04 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:35:05 | users/4-check-cluster | 1 logger.go:42: 03:35:05 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 03:35:05 | users/4-check-cluster | ++ echo success logger.go:42: 03:35:05 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success' logger.go:42: 03:35:05 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 03:35:05 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 03:35:05 | users/4-check-cluster | +++ kubectl -n kuttl-test-workable-clam get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:35:06 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 03:35:06 | users/4-check-cluster | ++ local cluster=users logger.go:42: 03:35:06 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 03:35:06 | users/4-check-cluster | + mysql_args='-h users-haproxy -uheartbeat -pheartbeat_password_updated' logger.go:42: 03:35:06 | users/4-check-cluster | + pod=mysql-client logger.go:42: 03:35:06 | users/4-check-cluster | + case $user in logger.go:42: 03:35:06 | users/4-check-cluster | + mysql_args='-h localhost -uheartbeat -pheartbeat_password_updated' logger.go:42: 03:35:06 | users/4-check-cluster | ++ get_cluster_name logger.go:42: 03:35:06 | users/4-check-cluster | ++ kubectl -n kuttl-test-workable-clam get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:35:06 | users/4-check-cluster | + pod=users-mysql-0 logger.go:42: 03:35:06 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h localhost -uheartbeat -pheartbeat_password_updated' users-mysql-0 logger.go:42: 03:35:06 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 03:35:06 | users/4-check-cluster | + local 'uri=-h localhost -uheartbeat -pheartbeat_password_updated' logger.go:42: 03:35:06 | users/4-check-cluster | + local pod=users-mysql-0 logger.go:42: 03:35:06 | users/4-check-cluster | ++ get_client_pod logger.go:42: 03:35:06 | users/4-check-cluster | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:35:07 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 03:35:07 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 03:35:07 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 03:35:07 | users/4-check-cluster | + set +o xtrace logger.go:42: 03:35:07 | users/4-check-cluster | mysql-clienttrue logger.go:42: 03:35:07 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 03:35:07 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:35:07 | users/4-check-cluster | + kubectl -n kuttl-test-workable-clam exec users-mysql-0 -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h localhost -uheartbeat -pheartbeat_password_updated' logger.go:42: 03:35:09 | users/4-check-cluster | Defaulted container "mysql" out of: mysql, xtrabackup, pt-heartbeat, mysql-init (init) logger.go:42: 03:35:09 | users/4-check-cluster | 1 logger.go:42: 03:35:09 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 03:35:09 | users/4-check-cluster | ++ echo success logger.go:42: 03:35:09 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success --from-literal=heartbeat=success' logger.go:42: 03:35:09 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 03:35:09 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 03:35:09 | users/4-check-cluster | +++ kubectl -n kuttl-test-workable-clam get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:35:09 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 03:35:09 | users/4-check-cluster | ++ local cluster=users logger.go:42: 03:35:09 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 03:35:09 | users/4-check-cluster | + mysql_args='-h users-haproxy -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 03:35:09 | users/4-check-cluster | + pod=mysql-client logger.go:42: 03:35:09 | users/4-check-cluster | + case $user in logger.go:42: 03:35:09 | users/4-check-cluster | + mysql_args='-h localhost -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 03:35:09 | users/4-check-cluster | ++ get_cluster_name logger.go:42: 03:35:09 | users/4-check-cluster | ++ kubectl -n kuttl-test-workable-clam get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:35:10 | users/4-check-cluster | + pod=users-mysql-0 logger.go:42: 03:35:10 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h localhost -uxtrabackup -pxtrabackup_password_updated' users-mysql-0 logger.go:42: 03:35:10 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 03:35:10 | users/4-check-cluster | + local 'uri=-h localhost -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 03:35:10 | users/4-check-cluster | + local pod=users-mysql-0 logger.go:42: 03:35:10 | users/4-check-cluster | ++ get_client_pod logger.go:42: 03:35:10 | users/4-check-cluster | ++ kubectl -n kuttl-test-workable-clam get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:35:10 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 03:35:10 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 03:35:10 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 03:35:10 | users/4-check-cluster | + set +o xtrace logger.go:42: 03:35:10 | users/4-check-cluster | mysql-clienttrue logger.go:42: 03:35:10 | users/4-check-cluster | + kubectl -n kuttl-test-workable-clam exec users-mysql-0 -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h localhost -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 03:35:10 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 03:35:10 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:35:12 | users/4-check-cluster | Defaulted container "mysql" out of: mysql, xtrabackup, pt-heartbeat, mysql-init (init) logger.go:42: 03:35:12 | users/4-check-cluster | 1 logger.go:42: 03:35:12 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 03:35:12 | users/4-check-cluster | ++ echo success logger.go:42: 03:35:12 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success --from-literal=heartbeat=success --from-literal=xtrabackup=success' logger.go:42: 03:35:12 | users/4-check-cluster | + set -o errexit logger.go:42: 03:35:12 | users/4-check-cluster | + kubectl create configmap -n kuttl-test-workable-clam 04-check-connections --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success --from-literal=heartbeat=success --from-literal=xtrabackup=success logger.go:42: 03:35:13 | users/4-check-cluster | configmap/04-check-connections created logger.go:42: 03:35:13 | users/4-check-cluster | + kubectl get configmap -n kuttl-test-workable-clam 04-check-connections -o yaml logger.go:42: 03:35:13 | users/4-check-cluster | apiVersion: v1 logger.go:42: 03:35:13 | users/4-check-cluster | data: logger.go:42: 03:35:13 | users/4-check-cluster | heartbeat: success logger.go:42: 03:35:13 | users/4-check-cluster | monitor: success logger.go:42: 03:35:13 | users/4-check-cluster | operator: success logger.go:42: 03:35:13 | users/4-check-cluster | orchestrator: success logger.go:42: 03:35:13 | users/4-check-cluster | replication: success logger.go:42: 03:35:13 | users/4-check-cluster | xtrabackup: success logger.go:42: 03:35:13 | users/4-check-cluster | kind: ConfigMap logger.go:42: 03:35:13 | users/4-check-cluster | metadata: logger.go:42: 03:35:13 | users/4-check-cluster | creationTimestamp: "2024-09-18T03:35:13Z" logger.go:42: 03:35:13 | users/4-check-cluster | name: 04-check-connections logger.go:42: 03:35:13 | users/4-check-cluster | namespace: kuttl-test-workable-clam logger.go:42: 03:35:13 | users/4-check-cluster | resourceVersion: "45614" logger.go:42: 03:35:13 | users/4-check-cluster | uid: a5ea522d-3056-4de0-885b-5f862fc22eaa logger.go:42: 03:35:13 | users/4-check-cluster | + wait_cluster_consistency_async users 3 3 logger.go:42: 03:35:13 | users/4-check-cluster | + local cluster_name=users logger.go:42: 03:35:13 | users/4-check-cluster | + local cluster_size=3 logger.go:42: 03:35:13 | users/4-check-cluster | + local orc_size=3 logger.go:42: 03:35:13 | users/4-check-cluster | + '[' -z 3 ']' logger.go:42: 03:35:13 | users/4-check-cluster | + sleep 7 logger.go:42: 03:35:20 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.state}' logger.go:42: 03:35:20 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:35:20 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.mysql.ready}' logger.go:42: 03:35:21 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:35:21 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 03:35:21 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 03:35:21 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.orchestrator.state}' logger.go:42: 03:35:22 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:35:22 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-workable-clam -o 'jsonpath={.status.state}' logger.go:42: 03:35:22 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 03:35:22 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 03:35:22 | users/4-check-cluster | +++ kubectl -n kuttl-test-workable-clam get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:35:22 | users/4-check-cluster | ++ get_orc_headless_fqdn users 0 logger.go:42: 03:35:22 | users/4-check-cluster | ++ local cluster=users logger.go:42: 03:35:22 | users/4-check-cluster | ++ local index=0 logger.go:42: 03:35:22 | users/4-check-cluster | ++ echo users-orc-0.users-orc logger.go:42: 03:35:22 | users/4-check-cluster | + orc_host=users-orc-0.users-orc logger.go:42: 03:35:22 | users/4-check-cluster | ++ run_curl http://users-orc-0.users-orc:3000/api/clusters/ logger.go:42: 03:35:22 | users/4-check-cluster | ++ kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'curl -s -k http://users-orc-0.users-orc:3000/api/clusters/' logger.go:42: 03:35:22 | users/4-check-cluster | ++ jq -r '.[0]' logger.go:42: 03:35:24 | users/4-check-cluster | + cluster=users-mysql-0.users-mysql.kuttl-test-workable-clam:3306 logger.go:42: 03:35:24 | users/4-check-cluster | ++ run_curl http://users-orc-0.users-orc:3000/api/cluster/users-mysql-0.users-mysql.kuttl-test-workable-clam:3306/ logger.go:42: 03:35:24 | users/4-check-cluster | ++ tee logger.go:42: 03:35:24 | users/4-check-cluster | ++ kubectl -n kuttl-test-workable-clam exec mysql-client -- bash -c 'curl -s -k http://users-orc-0.users-orc:3000/api/cluster/users-mysql-0.users-mysql.kuttl-test-workable-clam:3306/' logger.go:42: 03:35:24 | users/4-check-cluster | ++ jq -r '.[] | "\(.ReplicationSQLThreadRuning) \(.ReplicationIOThreadRuning)"' logger.go:42: 03:35:24 | users/4-check-cluster | ++ wc -l logger.go:42: 03:35:24 | users/4-check-cluster | ++ sed 's/ *//' logger.go:42: 03:35:24 | users/4-check-cluster | ++ grep true logger.go:42: 03:35:25 | users/4-check-cluster | + replicating=2 logger.go:42: 03:35:25 | users/4-check-cluster | + kubectl create configmap -n kuttl-test-workable-clam 04-check-replication --from-literal=replicating=2 logger.go:42: 03:35:26 | users/4-check-cluster | configmap/04-check-replication created logger.go:42: 03:35:28 | users/4-check-cluster | test step completed 4-check-cluster logger.go:42: 03:35:28 | users/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 03:35:28 | users/98-drop-finalizer | PerconaServerMySQL:kuttl-test-workable-clam/users updated logger.go:42: 03:35:28 | users/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 03:35:28 | users/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/tests/users logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | ++ test_name=users logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/vars.sh logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-746 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/deploy logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-746/e2e-tests/conf logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-746 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-746 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export VERSION=PR-746-e3b0b614 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ VERSION=PR-746-e3b0b614 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-746-e3b0b614 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-746/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | ++++ which date logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ date=/usr/bin/date logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ command -v oc logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 03:35:29 | users/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 03:35:30 | users/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 03:35:30 | users/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 03:35:30 | users/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 03:35:30 | users/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 03:35:30 | users/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 03:35:36 | users/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 03:35:36 | users | users events from ns kuttl-test-workable-clam: logger.go:42: 03:35:36 | users | 2024-09-18 03:27:26 +0000 UTC Normal Pod mysql-client Scheduled Successfully assigned kuttl-test-workable-clam/mysql-client to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-hvgv default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:27:27 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.0.33" already present on machine kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:27 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container mysql-client kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:27 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:38 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:27:38 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-workable-clam/datadir-users-mysql-0" pd.csi.storage.gke.io_gke-183649811e6640fb921d-5370-da14-vm_a80ecd7d-8d3c-4131-b91e-d7f66ea1fb3b logger.go:42: 03:35:36 | users | 2024-09-18 03:27:38 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:27:38 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Claim datadir-users-mysql-0 Pod users-mysql-0 in StatefulSet users-mysql success statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:27:38 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Pod users-mysql-0 in StatefulSet users-mysql successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:27:38 +0000 UTC Normal Pod users-orc-0 Scheduled Successfully assigned kuttl-test-workable-clam/users-orc-0 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-hvgv default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:27:38 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulCreate create Pod users-orc-0 in StatefulSet users-orc successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:27:39 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:39 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 129ms (129ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:39 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:39 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:40 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:41 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 134ms (135ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:41 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Created Created container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:41 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:41 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:41 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 92ms (92ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:41 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:41 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:43 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-a7be716b-5656-42c5-b2d9-3611d5625a28 pd.csi.storage.gke.io_gke-183649811e6640fb921d-5370-da14-vm_a80ecd7d-8d3c-4131-b91e-d7f66ea1fb3b logger.go:42: 03:35:36 | users | 2024-09-18 03:27:44 +0000 UTC Normal Pod users-mysql-0 Scheduled Successfully assigned kuttl-test-workable-clam/users-mysql-0 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-4cdw default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:27:52 +0000 UTC Normal Pod users-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-a7be716b-5656-42c5-b2d9-3611d5625a28" attachdetach-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:27:55 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:55 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 116ms (116ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:55 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:55 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:57 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:57 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 124ms (124ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:57 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:57 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:57 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:57 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 126ms (126ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:58 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:58 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:58 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:58 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 107ms (107ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:58 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:27:58 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:14 +0000 UTC Normal Pod users-orc-1 Scheduled Successfully assigned kuttl-test-workable-clam/users-orc-1 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-krsc default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:28:14 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:14 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 123ms (123ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:14 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:14 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:14 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulCreate create Pod users-orc-1 in StatefulSet users-orc successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:28:16 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:16 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 106ms (106ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:16 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Created Created container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:16 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:16 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:16 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 105ms (105ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:16 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:16 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:29 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:28:29 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Claim datadir-users-mysql-1 Pod users-mysql-1 in StatefulSet users-mysql success statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:28:30 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:28:30 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-workable-clam/datadir-users-mysql-1" pd.csi.storage.gke.io_gke-183649811e6640fb921d-5370-da14-vm_a80ecd7d-8d3c-4131-b91e-d7f66ea1fb3b logger.go:42: 03:35:36 | users | 2024-09-18 03:28:30 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Pod users-mysql-1 in StatefulSet users-mysql successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:28:33 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-bd1e00df-9df1-4ad3-98e0-73d2a521acf8 pd.csi.storage.gke.io_gke-183649811e6640fb921d-5370-da14-vm_a80ecd7d-8d3c-4131-b91e-d7f66ea1fb3b logger.go:42: 03:35:36 | users | 2024-09-18 03:28:34 +0000 UTC Normal Pod users-mysql-1 Scheduled Successfully assigned kuttl-test-workable-clam/users-mysql-1 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-hvgv default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:28:35 +0000 UTC Normal Pod users-haproxy-0 Scheduled Successfully assigned kuttl-test-workable-clam/users-haproxy-0 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-4cdw default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:28:35 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:35 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 128ms (128ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:35 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:35 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:35 +0000 UTC Normal StatefulSet.apps users-haproxy SuccessfulCreate create Pod users-haproxy-0 in StatefulSet users-haproxy successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:28:37 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:37 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 121ms (121ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:37 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:37 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:37 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:37 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 100ms (100ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:37 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:37 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:38 +0000 UTC Normal Pod users-haproxy-1 Scheduled Successfully assigned kuttl-test-workable-clam/users-haproxy-1 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-krsc default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:28:38 +0000 UTC Normal StatefulSet.apps users-haproxy SuccessfulCreate create Pod users-haproxy-1 in StatefulSet users-haproxy successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:28:40 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:40 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 131ms (131ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:40 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:40 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:41 +0000 UTC Normal Pod users-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-bd1e00df-9df1-4ad3-98e0-73d2a521acf8" attachdetach-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:28:42 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:42 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 117ms (117ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:42 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:42 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:42 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:42 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 85ms (85ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:42 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:42 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:42 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:43 +0000 UTC Normal Pod users-haproxy-2 Scheduled Successfully assigned kuttl-test-workable-clam/users-haproxy-2 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-hvgv default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:28:43 +0000 UTC Normal StatefulSet.apps users-haproxy SuccessfulCreate create Pod users-haproxy-2 in StatefulSet users-haproxy successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:28:43 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 124ms (124ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:43 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:43 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:44 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:44 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 335ms (335ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:44 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:44 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:45 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:45 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 653ms (653ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:45 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:45 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:45 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:45 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 114ms (114ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:45 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 116ms (116ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 139ms (139ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 138ms (138ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:46 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:49 +0000 UTC Normal Pod users-orc-2 Scheduled Successfully assigned kuttl-test-workable-clam/users-orc-2 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-4cdw default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:28:49 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulCreate create Pod users-orc-2 in StatefulSet users-orc successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:28:50 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:50 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 113ms (113ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:50 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:50 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:52 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:52 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 123ms (123ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:52 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Created Created container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:52 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:52 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:52 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 109ms (109ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:52 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:28:52 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:03 +0000 UTC Warning Pod users-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2024/09/18 03:29:02 Peers: [3438613535323330.users-mysql-unready.kuttl-test-workable-clam 3734643066633664.users-mysql-unready.kuttl-test-workable-clam] 2024/09/18 03:29:02 FQDN: users-mysql-1.users-mysql.kuttl-test-workable-clam 2024/09/18 03:29:02 Primary: users-mysql-0.users-mysql.kuttl-test-workable-clam Replicas: [users-mysql-1.users-mysql.kuttl-test-workable-clam] 2024/09/18 03:29:02 lookup users-mysql-1 [10.241.184.49] 2024/09/18 03:29:02 PodIP: 10.241.184.49 2024/09/18 03:29:02 lookup users-mysql-0.users-mysql.kuttl-test-workable-clam [10.241.185.36] 2024/09/18 03:29:02 PrimaryIP: 10.241.185.36 2024/09/18 03:29:02 Donor: users-mysql-0.users-mysql.kuttl-test-workable-clam 2024/09/18 03:29:02 Opening connection to 10.241.184.49 2024/09/18 03:29:02 Clone required: true 2024/09/18 03:29:02 Checking if a clone in progress 2024/09/18 03:29:02 Clone in progress: false 2024/09/18 03:29:02 Cloning from users-mysql-0.users-mysql.kuttl-test-workable-clam 2024/09/18 03:29:03 Clone finished. Restarting container... kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:03 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:07 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 97ms (97ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:37 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:29:37 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Claim datadir-users-mysql-2 Pod users-mysql-2 in StatefulSet users-mysql success statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:29:37 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Pod users-mysql-2 in StatefulSet users-mysql successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:29:38 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:29:38 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-workable-clam/datadir-users-mysql-2" pd.csi.storage.gke.io_gke-183649811e6640fb921d-5370-da14-vm_a80ecd7d-8d3c-4131-b91e-d7f66ea1fb3b logger.go:42: 03:35:36 | users | 2024-09-18 03:29:41 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-579b228c-0d81-4e88-ad2a-ba5333c68dc7 pd.csi.storage.gke.io_gke-183649811e6640fb921d-5370-da14-vm_a80ecd7d-8d3c-4131-b91e-d7f66ea1fb3b logger.go:42: 03:35:36 | users | 2024-09-18 03:29:42 +0000 UTC Normal Pod users-mysql-2 Scheduled Successfully assigned kuttl-test-workable-clam/users-mysql-2 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-krsc default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:29:49 +0000 UTC Normal Pod users-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-579b228c-0d81-4e88-ad2a-ba5333c68dc7" attachdetach-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:29:51 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:51 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 136ms (136ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:51 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:51 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:53 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:53 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 106ms (106ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:53 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:53 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:53 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:53 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 119ms (119ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:53 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:53 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:53 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:54 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 94ms (94ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:54 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:29:54 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:30:12 +0000 UTC Warning Pod users-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2024/09/18 03:30:11 Peers: [3438613535323330.users-mysql-unready.kuttl-test-workable-clam 3734643066633664.users-mysql-unready.kuttl-test-workable-clam 6233646634373734.users-mysql-unready.kuttl-test-workable-clam] 2024/09/18 03:30:11 FQDN: users-mysql-2.users-mysql.kuttl-test-workable-clam 2024/09/18 03:30:11 Primary: users-mysql-0.users-mysql.kuttl-test-workable-clam Replicas: [users-mysql-1.users-mysql.kuttl-test-workable-clam users-mysql-2.users-mysql.kuttl-test-workable-clam] 2024/09/18 03:30:11 lookup users-mysql-2 [10.241.186.37] 2024/09/18 03:30:11 PodIP: 10.241.186.37 2024/09/18 03:30:11 lookup users-mysql-0.users-mysql.kuttl-test-workable-clam [10.241.185.36] 2024/09/18 03:30:11 PrimaryIP: 10.241.185.36 2024/09/18 03:30:11 Donor: users-mysql-1.users-mysql.kuttl-test-workable-clam 2024/09/18 03:30:11 Opening connection to 10.241.186.37 2024/09/18 03:30:11 Clone required: true 2024/09/18 03:30:11 Checking if a clone in progress 2024/09/18 03:30:11 Clone in progress: false 2024/09/18 03:30:11 Cloning from users-mysql-1.users-mysql.kuttl-test-workable-clam 2024/09/18 03:30:12 Clone finished. Restarting container... kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:30:12 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:30:15 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 129ms (129ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:13 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:13 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulDelete delete Pod users-orc-2 in StatefulSet users-orc successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:31:14 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:45 +0000 UTC Normal Pod users-orc-2 Scheduled Successfully assigned kuttl-test-workable-clam/users-orc-2 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-4cdw default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:31:45 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:45 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 132ms (132ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:45 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:45 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:47 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:48 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 183ms (183ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:48 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Created Created container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:48 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:48 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:48 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 139ms (139ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:48 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:31:48 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:20 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:20 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:20 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulDelete delete Pod users-orc-1 in StatefulSet users-orc successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:32:51 +0000 UTC Normal Pod users-orc-1 Scheduled Successfully assigned kuttl-test-workable-clam/users-orc-1 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-krsc default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:32:51 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:51 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 98ms (98ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:51 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:51 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:53 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:54 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 105ms (105ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:54 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Created Created container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:54 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:54 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:54 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 113ms (113ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:54 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:32:54 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:33:26 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:33:26 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:33:26 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulDelete delete Pod users-orc-0 in StatefulSet users-orc successful statefulset-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:33:56 +0000 UTC Warning PerconaServerMySQL.ps.percona.com users AsyncReplicationNotReady orchestrator: empty response ps-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:33:57 +0000 UTC Normal Pod users-orc-0 Scheduled Successfully assigned kuttl-test-workable-clam/users-orc-0 to gke-jen-ps-746-e3b0b614--default-pool-8b49d265-hvgv default-scheduler logger.go:42: 03:35:36 | users | 2024-09-18 03:33:58 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:33:58 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-746-e3b0b614" in 109ms (109ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:33:58 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:33:58 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:33:59 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:34:00 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 101ms (101ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:34:00 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Created Created container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:34:00 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:34:00 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:34:00 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 102ms (102ms including waiting) kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:34:00 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:34:00 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:34:35 +0000 UTC Warning PerconaServerMySQL.ps.percona.com users AsyncReplicationNotReady orchestrator: unable to determine cluster name ps-controller logger.go:42: 03:35:36 | users | 2024-09-18 03:35:29 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:29 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:29 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:29 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:29 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:29 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:30 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:31 +0000 UTC Warning Pod users-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2024/09/18 03:35:31 readiness check failed: connect to db: ping DB: dial tcp 10.241.186.37:33062: connect: connection refused kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:32 +0000 UTC Warning Pod users-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2024/09/18 03:35:32 readiness check failed: connect to db: ping DB: dial tcp 10.241.184.49:33062: connect: connection refused kubelet logger.go:42: 03:35:36 | users | 2024-09-18 03:35:34 +0000 UTC Warning Pod users-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2024/09/18 03:35:34 readiness check failed: connect to db: ping DB: dial tcp 10.241.185.36:33062: connect: connection refused kubelet logger.go:42: 03:35:36 | users | Deleting namespace: kuttl-test-workable-clam === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- PASS: kuttl (541.11s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/users (540.65s) PASS