=== RUN kuttl harness.go:459: starting setup harness.go:254: running tests using configured kubeconfig. harness.go:277: Successful connection to cluster at: https://34.27.34.19 harness.go:362: running tests harness.go:74: going to run test suite with timeout of 180 seconds for each step harness.go:374: testsuite: e2e-tests/tests has 34 tests === RUN kuttl/harness === RUN kuttl/harness/users === PAUSE kuttl/harness/users === CONT kuttl/harness/users logger.go:42: 19:38:27 | users | Creating namespace: kuttl-test-genuine-newt logger.go:42: 19:38:27 | users/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 19:38:27 | users/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 19:38:27 | users/0-deploy-operator | + source ../../functions logger.go:42: 19:38:27 | users/0-deploy-operator | +++ realpath ../../.. logger.go:42: 19:38:27 | users/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:38:27 | users/0-deploy-operator | ++++ pwd logger.go:42: 19:38:27 | users/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/tests/users logger.go:42: 19:38:27 | users/0-deploy-operator | ++ test_name=users logger.go:42: 19:38:27 | users/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/vars.sh logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:38:27 | users/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy logger.go:42: 19:38:27 | users/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests logger.go:42: 19:38:27 | users/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf logger.go:42: 19:38:27 | users/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 19:38:27 | users/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 19:38:27 | users/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export GIT_BRANCH=PR-920 logger.go:42: 19:38:27 | users/0-deploy-operator | +++ GIT_BRANCH=PR-920 logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export VERSION=PR-920-3aaba39c logger.go:42: 19:38:27 | users/0-deploy-operator | +++ VERSION=PR-920-3aaba39c logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:38:27 | users/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:38:27 | users/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:38:27 | users/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:38:27 | users/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:38:27 | users/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:38:27 | users/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:38:27 | users/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:38:27 | users/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:38:27 | users/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:38:27 | users/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:38:27 | users/0-deploy-operator | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 19:38:27 | users/0-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 19:38:27 | users/0-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:38:27 | users/0-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:38:27 | users/0-deploy-operator | ++++ which gdate logger.go:42: 19:38:27 | users/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-920/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:38:27 | users/0-deploy-operator | ++++ which date logger.go:42: 19:38:27 | users/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 19:38:27 | users/0-deploy-operator | +++ oc get projects logger.go:42: 19:38:27 | users/0-deploy-operator | +++ : logger.go:42: 19:38:27 | users/0-deploy-operator | +++ kubectl get nodes logger.go:42: 19:38:27 | users/0-deploy-operator | +++ grep '^minikube' logger.go:42: 19:38:27 | users/0-deploy-operator | ++ oc get projects logger.go:42: 19:38:27 | users/0-deploy-operator | + init_temp_dir logger.go:42: 19:38:27 | users/0-deploy-operator | + rm -rf /tmp/kuttl/ps/users logger.go:42: 19:38:27 | users/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/users logger.go:42: 19:38:27 | users/0-deploy-operator | + deploy_operator logger.go:42: 19:38:27 | users/0-deploy-operator | + destroy_operator logger.go:42: 19:38:27 | users/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 19:38:28 | users/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 19:38:28 | users/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 19:38:28 | users/0-deploy-operator | + true logger.go:42: 19:38:28 | users/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 19:38:28 | users/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 19:38:28 | users/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 19:38:28 | users/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 19:38:28 | users/0-deploy-operator | + true logger.go:42: 19:38:28 | users/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 19:38:28 | users/0-deploy-operator | + create_namespace ps-operator logger.go:42: 19:38:28 | users/0-deploy-operator | + local namespace=ps-operator logger.go:42: 19:38:28 | users/0-deploy-operator | + [[ -n '' ]] logger.go:42: 19:38:28 | users/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 19:38:29 | users/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 19:38:29 | users/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 19:38:29 | users/0-deploy-operator | namespace/ps-operator created logger.go:42: 19:38:30 | users/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy/crd.yaml logger.go:42: 19:38:30 | users/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 19:38:31 | users/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 19:38:32 | users/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 19:38:32 | users/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 19:38:32 | users/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy/cw-rbac.yaml logger.go:42: 19:38:33 | users/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 19:38:33 | users/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 19:38:33 | users/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 19:38:33 | users/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 19:38:34 | users/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 19:38:34 | users/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 19:38:34 | users/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 19:38:34 | users/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 19:38:34 | users/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:38:34 | users/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-920-3aaba39c"' /mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy/cw-operator.yaml logger.go:42: 19:38:35 | users/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 19:38:35 | users/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 19:38:35 | users/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 19:38:35 | users/0-deploy-operator | + kubectl -n kuttl-test-genuine-newt apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf/secrets.yaml logger.go:42: 19:38:36 | users/0-deploy-operator | secret/test-secrets created logger.go:42: 19:38:36 | users/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 19:38:36 | users/0-deploy-operator | + kubectl -n kuttl-test-genuine-newt apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf/ssl-secret.yaml logger.go:42: 19:38:37 | users/0-deploy-operator | secret/test-ssl created logger.go:42: 19:38:37 | users/0-deploy-operator | + deploy_client logger.go:42: 19:38:37 | users/0-deploy-operator | + kubectl -n kuttl-test-genuine-newt apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf/client.yaml logger.go:42: 19:38:38 | users/0-deploy-operator | pod/mysql-client created logger.go:42: 19:38:39 | users/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 19:38:39 | users/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 19:38:39 | users/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 19:38:40 | users/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 19:38:41 | users/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 19:38:41 | users/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 19:38:42 | users/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 19:38:42 | users/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 19:38:43 | users/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 19:38:44 | users/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 19:38:44 | users/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 19:38:45 | users/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 19:38:46 | users/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 19:38:46 | users/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 19:38:46 | users/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 19:38:46 | users/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 19:38:46 | users/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 19:38:46 | users/0-deploy-operator | ASSERT PASS logger.go:42: 19:38:46 | users/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 19:38:46 | users/1-create-cluster | starting test step 1-create-cluster logger.go:42: 19:38:46 | users/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 19:38:46 | users/1-create-cluster | + source ../../functions logger.go:42: 19:38:46 | users/1-create-cluster | +++ realpath ../../.. logger.go:42: 19:38:46 | users/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:38:46 | users/1-create-cluster | ++++ pwd logger.go:42: 19:38:46 | users/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/tests/users logger.go:42: 19:38:46 | users/1-create-cluster | ++ test_name=users logger.go:42: 19:38:46 | users/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/vars.sh logger.go:42: 19:38:46 | users/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:38:46 | users/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:38:46 | users/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy logger.go:42: 19:38:46 | users/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy logger.go:42: 19:38:46 | users/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests logger.go:42: 19:38:46 | users/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests logger.go:42: 19:38:46 | users/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf logger.go:42: 19:38:46 | users/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf logger.go:42: 19:38:46 | users/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 19:38:46 | users/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 19:38:46 | users/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:38:46 | users/1-create-cluster | +++ export GIT_BRANCH=PR-920 logger.go:42: 19:38:46 | users/1-create-cluster | +++ GIT_BRANCH=PR-920 logger.go:42: 19:38:46 | users/1-create-cluster | +++ export VERSION=PR-920-3aaba39c logger.go:42: 19:38:46 | users/1-create-cluster | +++ VERSION=PR-920-3aaba39c logger.go:42: 19:38:46 | users/1-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:38:46 | users/1-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:38:46 | users/1-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:38:46 | users/1-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:38:46 | users/1-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:38:46 | users/1-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:38:46 | users/1-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:38:46 | users/1-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:38:46 | users/1-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:38:46 | users/1-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:38:46 | users/1-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:38:46 | users/1-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:38:46 | users/1-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:38:46 | users/1-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:38:46 | users/1-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:38:46 | users/1-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:38:46 | users/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:38:46 | users/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:38:46 | users/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:38:46 | users/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:38:46 | users/1-create-cluster | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:38:46 | users/1-create-cluster | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:38:46 | users/1-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 19:38:46 | users/1-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 19:38:46 | users/1-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:38:46 | users/1-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:38:46 | users/1-create-cluster | ++++ which gdate logger.go:42: 19:38:46 | users/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-920/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:38:46 | users/1-create-cluster | ++++ which date logger.go:42: 19:38:46 | users/1-create-cluster | +++ date=/usr/bin/date logger.go:42: 19:38:46 | users/1-create-cluster | +++ oc get projects logger.go:42: 19:38:46 | users/1-create-cluster | +++ : logger.go:42: 19:38:46 | users/1-create-cluster | +++ kubectl get nodes logger.go:42: 19:38:46 | users/1-create-cluster | +++ grep '^minikube' logger.go:42: 19:38:47 | users/1-create-cluster | ++ oc get projects logger.go:42: 19:38:47 | users/1-create-cluster | + get_cr logger.go:42: 19:38:47 | users/1-create-cluster | + local name_suffix= logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 19:38:47 | users/1-create-cluster | ++ printf '.metadata.name="%s"' users logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.metadata.name="users"' /mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy/cr.yaml logger.go:42: 19:38:47 | users/1-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 19:38:47 | users/1-create-cluster | + '[' -n '' ']' logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval - logger.go:42: 19:38:47 | users/1-create-cluster | + kubectl -n kuttl-test-genuine-newt apply -f - logger.go:42: 19:38:47 | users/1-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 19:38:47 | users/1-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:38:47 | users/1-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-920-3aaba39c"' - logger.go:42: 19:38:47 | users/1-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 19:38:47 | users/1-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 19:38:47 | users/1-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 19:38:47 | users/1-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:38:47 | users/1-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 19:38:48 | users/1-create-cluster | perconaservermysql.ps.percona.com/users created logger.go:42: 19:41:57 | users/1-create-cluster | test step completed 1-create-cluster logger.go:42: 19:41:57 | users/2-check-users | starting test step 2-check-users logger.go:42: 19:41:57 | users/2-check-users | running command: [sh -c set -o errexit set -o xtrace source ../../functions mysql_args="-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" users=($(get_mysql_users "${mysql_args}")) args='' for user in "${users[@]}"; do host="%" case $user in heartbeat | xtrabackup) host="localhost" ;; esac query="SHOW GRANTS FOR '${user}'@'${host}';" run_mysql "${query}" "${mysql_args}" \ | sed -E "s/'(10|192)[.][0-9][^']*'//; s/'[^']*[.]internal'//" \ >"${TEMP_DIR}/${user}.sql" args="${args} --from-file=${user}=${TEMP_DIR}/${user}.sql" done kubectl create configmap -n "${NAMESPACE}" 02-check-users $args kubectl get configmap -n "${NAMESPACE}" 02-check-users -o yaml] logger.go:42: 19:41:57 | users/2-check-users | + source ../../functions logger.go:42: 19:41:57 | users/2-check-users | +++ realpath ../../.. logger.go:42: 19:41:57 | users/2-check-users | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:41:57 | users/2-check-users | ++++ pwd logger.go:42: 19:41:57 | users/2-check-users | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/tests/users logger.go:42: 19:41:57 | users/2-check-users | ++ test_name=users logger.go:42: 19:41:57 | users/2-check-users | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/vars.sh logger.go:42: 19:41:57 | users/2-check-users | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:41:57 | users/2-check-users | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:41:57 | users/2-check-users | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy logger.go:42: 19:41:57 | users/2-check-users | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy logger.go:42: 19:41:57 | users/2-check-users | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests logger.go:42: 19:41:57 | users/2-check-users | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests logger.go:42: 19:41:57 | users/2-check-users | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf logger.go:42: 19:41:57 | users/2-check-users | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf logger.go:42: 19:41:57 | users/2-check-users | +++ export TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 19:41:57 | users/2-check-users | +++ TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 19:41:57 | users/2-check-users | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:41:57 | users/2-check-users | +++ export GIT_BRANCH=PR-920 logger.go:42: 19:41:57 | users/2-check-users | +++ GIT_BRANCH=PR-920 logger.go:42: 19:41:57 | users/2-check-users | +++ export VERSION=PR-920-3aaba39c logger.go:42: 19:41:57 | users/2-check-users | +++ VERSION=PR-920-3aaba39c logger.go:42: 19:41:57 | users/2-check-users | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:41:57 | users/2-check-users | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:41:57 | users/2-check-users | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:41:57 | users/2-check-users | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:41:57 | users/2-check-users | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:41:57 | users/2-check-users | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:41:57 | users/2-check-users | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:41:57 | users/2-check-users | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:41:57 | users/2-check-users | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:41:57 | users/2-check-users | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:41:57 | users/2-check-users | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:41:57 | users/2-check-users | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:41:57 | users/2-check-users | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:41:57 | users/2-check-users | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:41:57 | users/2-check-users | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:41:57 | users/2-check-users | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:41:57 | users/2-check-users | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:41:57 | users/2-check-users | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:41:57 | users/2-check-users | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:41:57 | users/2-check-users | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:41:57 | users/2-check-users | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:41:57 | users/2-check-users | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:41:57 | users/2-check-users | +++ export MINIO_VER=5.4.0 logger.go:42: 19:41:57 | users/2-check-users | +++ MINIO_VER=5.4.0 logger.go:42: 19:41:57 | users/2-check-users | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:41:57 | users/2-check-users | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:41:57 | users/2-check-users | ++++ which gdate logger.go:42: 19:41:57 | users/2-check-users | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-920/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:41:57 | users/2-check-users | ++++ which date logger.go:42: 19:41:57 | users/2-check-users | +++ date=/usr/bin/date logger.go:42: 19:41:57 | users/2-check-users | +++ oc get projects logger.go:42: 19:41:57 | users/2-check-users | +++ : logger.go:42: 19:41:57 | users/2-check-users | +++ kubectl get nodes logger.go:42: 19:41:57 | users/2-check-users | +++ grep '^minikube' logger.go:42: 19:41:57 | users/2-check-users | ++ oc get projects logger.go:42: 19:41:57 | users/2-check-users | +++ get_cluster_name logger.go:42: 19:41:57 | users/2-check-users | +++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:41:58 | users/2-check-users | ++ get_haproxy_svc users logger.go:42: 19:41:58 | users/2-check-users | ++ local cluster=users logger.go:42: 19:41:58 | users/2-check-users | ++ echo users-haproxy logger.go:42: 19:41:58 | users/2-check-users | + mysql_args='-h users-haproxy -uroot -proot_password' logger.go:42: 19:41:58 | users/2-check-users | + users=($(get_mysql_users "${mysql_args}")) logger.go:42: 19:41:58 | users/2-check-users | ++ get_mysql_users '-h users-haproxy -uroot -proot_password' logger.go:42: 19:41:58 | users/2-check-users | ++ local 'args=-h users-haproxy -uroot -proot_password' logger.go:42: 19:41:58 | users/2-check-users | ++ run_mysql 'SELECT user FROM mysql.user' '-h users-haproxy -uroot -proot_password' logger.go:42: 19:41:58 | users/2-check-users | ++ grep -vE 'mysql|root' logger.go:42: 19:41:58 | users/2-check-users | ++ local 'command=SELECT user FROM mysql.user' logger.go:42: 19:41:58 | users/2-check-users | ++ local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 19:41:58 | users/2-check-users | ++ local pod= logger.go:42: 19:41:58 | users/2-check-users | +++ get_client_pod logger.go:42: 19:41:58 | users/2-check-users | +++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:41:58 | users/2-check-users | ++ client_pod=mysql-client logger.go:42: 19:41:58 | users/2-check-users | ++ wait_pod mysql-client logger.go:42: 19:41:58 | users/2-check-users | ++ local pod=mysql-client logger.go:42: 19:41:58 | users/2-check-users | ++ set +o xtrace logger.go:42: 19:41:59 | users/2-check-users | mysql-clienttrue logger.go:42: 19:41:59 | users/2-check-users | ++ kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT user FROM mysql.user" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 19:41:59 | users/2-check-users | ++ sed -e 's/mysql: //' logger.go:42: 19:41:59 | users/2-check-users | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:41:59 | users/2-check-users | + args= logger.go:42: 19:41:59 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 19:41:59 | users/2-check-users | + host=% logger.go:42: 19:41:59 | users/2-check-users | + case $user in logger.go:42: 19:41:59 | users/2-check-users | + query='SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';' logger.go:42: 19:41:59 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 19:41:59 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';' logger.go:42: 19:41:59 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 19:41:59 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 19:41:59 | users/2-check-users | + local pod= logger.go:42: 19:41:59 | users/2-check-users | ++ get_client_pod logger.go:42: 19:41:59 | users/2-check-users | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:42:00 | users/2-check-users | + client_pod=mysql-client logger.go:42: 19:42:00 | users/2-check-users | + wait_pod mysql-client logger.go:42: 19:42:00 | users/2-check-users | + local pod=mysql-client logger.go:42: 19:42:00 | users/2-check-users | + set +o xtrace logger.go:42: 19:42:00 | users/2-check-users | mysql-clienttrue logger.go:42: 19:42:00 | users/2-check-users | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 19:42:00 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:42:00 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 19:42:01 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql' logger.go:42: 19:42:01 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 19:42:01 | users/2-check-users | + host=% logger.go:42: 19:42:01 | users/2-check-users | + case $user in logger.go:42: 19:42:01 | users/2-check-users | + query='SHOW GRANTS FOR '\''operator'\''@'\''%'\'';' logger.go:42: 19:42:01 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 19:42:01 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''operator'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:01 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''operator'\''@'\''%'\'';' logger.go:42: 19:42:01 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:01 | users/2-check-users | + local pod= logger.go:42: 19:42:01 | users/2-check-users | ++ get_client_pod logger.go:42: 19:42:01 | users/2-check-users | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:42:01 | users/2-check-users | + client_pod=mysql-client logger.go:42: 19:42:01 | users/2-check-users | + wait_pod mysql-client logger.go:42: 19:42:01 | users/2-check-users | + local pod=mysql-client logger.go:42: 19:42:01 | users/2-check-users | + set +o xtrace logger.go:42: 19:42:02 | users/2-check-users | mysql-clienttrue logger.go:42: 19:42:02 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 19:42:02 | users/2-check-users | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''operator'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 19:42:02 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:42:03 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql' logger.go:42: 19:42:03 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 19:42:03 | users/2-check-users | + host=% logger.go:42: 19:42:03 | users/2-check-users | + case $user in logger.go:42: 19:42:03 | users/2-check-users | + query='SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';' logger.go:42: 19:42:03 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 19:42:03 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:03 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';' logger.go:42: 19:42:03 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:03 | users/2-check-users | + local pod= logger.go:42: 19:42:03 | users/2-check-users | ++ get_client_pod logger.go:42: 19:42:03 | users/2-check-users | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:42:03 | users/2-check-users | + client_pod=mysql-client logger.go:42: 19:42:03 | users/2-check-users | + wait_pod mysql-client logger.go:42: 19:42:03 | users/2-check-users | + local pod=mysql-client logger.go:42: 19:42:03 | users/2-check-users | + set +o xtrace logger.go:42: 19:42:03 | users/2-check-users | mysql-clienttrue logger.go:42: 19:42:03 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 19:42:03 | users/2-check-users | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 19:42:03 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:42:04 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql --from-file=orchestrator=/tmp/kuttl/ps/users/orchestrator.sql' logger.go:42: 19:42:04 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 19:42:04 | users/2-check-users | + host=% logger.go:42: 19:42:04 | users/2-check-users | + case $user in logger.go:42: 19:42:04 | users/2-check-users | + query='SHOW GRANTS FOR '\''replication'\''@'\''%'\'';' logger.go:42: 19:42:04 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''replication'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:04 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''replication'\''@'\''%'\'';' logger.go:42: 19:42:04 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:04 | users/2-check-users | + local pod= logger.go:42: 19:42:04 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 19:42:04 | users/2-check-users | ++ get_client_pod logger.go:42: 19:42:04 | users/2-check-users | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:42:05 | users/2-check-users | + client_pod=mysql-client logger.go:42: 19:42:05 | users/2-check-users | + wait_pod mysql-client logger.go:42: 19:42:05 | users/2-check-users | + local pod=mysql-client logger.go:42: 19:42:05 | users/2-check-users | + set +o xtrace logger.go:42: 19:42:05 | users/2-check-users | mysql-clienttrue logger.go:42: 19:42:05 | users/2-check-users | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''replication'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 19:42:05 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 19:42:05 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:42:06 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql --from-file=orchestrator=/tmp/kuttl/ps/users/orchestrator.sql --from-file=replication=/tmp/kuttl/ps/users/replication.sql' logger.go:42: 19:42:06 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 19:42:06 | users/2-check-users | + host=% logger.go:42: 19:42:06 | users/2-check-users | + case $user in logger.go:42: 19:42:06 | users/2-check-users | + host=localhost logger.go:42: 19:42:06 | users/2-check-users | + query='SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';' logger.go:42: 19:42:06 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:06 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 19:42:06 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';' logger.go:42: 19:42:06 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:06 | users/2-check-users | + local pod= logger.go:42: 19:42:06 | users/2-check-users | ++ get_client_pod logger.go:42: 19:42:06 | users/2-check-users | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:42:06 | users/2-check-users | + client_pod=mysql-client logger.go:42: 19:42:06 | users/2-check-users | + wait_pod mysql-client logger.go:42: 19:42:06 | users/2-check-users | + local pod=mysql-client logger.go:42: 19:42:06 | users/2-check-users | + set +o xtrace logger.go:42: 19:42:07 | users/2-check-users | mysql-clienttrue logger.go:42: 19:42:07 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 19:42:07 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:42:07 | users/2-check-users | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 19:42:08 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql --from-file=orchestrator=/tmp/kuttl/ps/users/orchestrator.sql --from-file=replication=/tmp/kuttl/ps/users/replication.sql --from-file=heartbeat=/tmp/kuttl/ps/users/heartbeat.sql' logger.go:42: 19:42:08 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 19:42:08 | users/2-check-users | + host=% logger.go:42: 19:42:08 | users/2-check-users | + case $user in logger.go:42: 19:42:08 | users/2-check-users | + query='SHOW GRANTS FOR '\''percona.telemetry'\''@'\''%'\'';' logger.go:42: 19:42:08 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''percona.telemetry'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:08 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''percona.telemetry'\''@'\''%'\'';' logger.go:42: 19:42:08 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:08 | users/2-check-users | + local pod= logger.go:42: 19:42:08 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 19:42:08 | users/2-check-users | ++ get_client_pod logger.go:42: 19:42:08 | users/2-check-users | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:42:08 | users/2-check-users | + client_pod=mysql-client logger.go:42: 19:42:08 | users/2-check-users | + wait_pod mysql-client logger.go:42: 19:42:08 | users/2-check-users | + local pod=mysql-client logger.go:42: 19:42:08 | users/2-check-users | + set +o xtrace logger.go:42: 19:42:09 | users/2-check-users | mysql-clienttrue logger.go:42: 19:42:09 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 19:42:09 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:42:09 | users/2-check-users | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''percona.telemetry'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 19:42:09 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql --from-file=orchestrator=/tmp/kuttl/ps/users/orchestrator.sql --from-file=replication=/tmp/kuttl/ps/users/replication.sql --from-file=heartbeat=/tmp/kuttl/ps/users/heartbeat.sql --from-file=percona.telemetry=/tmp/kuttl/ps/users/percona.telemetry.sql' logger.go:42: 19:42:09 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 19:42:09 | users/2-check-users | + host=% logger.go:42: 19:42:09 | users/2-check-users | + case $user in logger.go:42: 19:42:09 | users/2-check-users | + host=localhost logger.go:42: 19:42:09 | users/2-check-users | + query='SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';' logger.go:42: 19:42:09 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 19:42:09 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:09 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';' logger.go:42: 19:42:09 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 19:42:09 | users/2-check-users | + local pod= logger.go:42: 19:42:09 | users/2-check-users | ++ get_client_pod logger.go:42: 19:42:09 | users/2-check-users | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:42:10 | users/2-check-users | + client_pod=mysql-client logger.go:42: 19:42:10 | users/2-check-users | + wait_pod mysql-client logger.go:42: 19:42:10 | users/2-check-users | + local pod=mysql-client logger.go:42: 19:42:10 | users/2-check-users | + set +o xtrace logger.go:42: 19:42:10 | users/2-check-users | mysql-clienttrue logger.go:42: 19:42:10 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 19:42:10 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:42:10 | users/2-check-users | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 19:42:11 | users/2-check-users | + args=' --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql --from-file=orchestrator=/tmp/kuttl/ps/users/orchestrator.sql --from-file=replication=/tmp/kuttl/ps/users/replication.sql --from-file=heartbeat=/tmp/kuttl/ps/users/heartbeat.sql --from-file=percona.telemetry=/tmp/kuttl/ps/users/percona.telemetry.sql --from-file=xtrabackup=/tmp/kuttl/ps/users/xtrabackup.sql' logger.go:42: 19:42:11 | users/2-check-users | + kubectl create configmap -n kuttl-test-genuine-newt 02-check-users --from-file=monitor=/tmp/kuttl/ps/users/monitor.sql --from-file=operator=/tmp/kuttl/ps/users/operator.sql --from-file=orchestrator=/tmp/kuttl/ps/users/orchestrator.sql --from-file=replication=/tmp/kuttl/ps/users/replication.sql --from-file=heartbeat=/tmp/kuttl/ps/users/heartbeat.sql --from-file=percona.telemetry=/tmp/kuttl/ps/users/percona.telemetry.sql --from-file=xtrabackup=/tmp/kuttl/ps/users/xtrabackup.sql logger.go:42: 19:42:12 | users/2-check-users | configmap/02-check-users created logger.go:42: 19:42:12 | users/2-check-users | + kubectl get configmap -n kuttl-test-genuine-newt 02-check-users -o yaml logger.go:42: 19:42:12 | users/2-check-users | apiVersion: v1 logger.go:42: 19:42:12 | users/2-check-users | data: logger.go:42: 19:42:12 | users/2-check-users | heartbeat: | logger.go:42: 19:42:12 | users/2-check-users | GRANT REPLICATION CLIENT ON *.* TO `heartbeat`@`localhost` logger.go:42: 19:42:12 | users/2-check-users | GRANT SYSTEM_USER ON *.* TO `heartbeat`@`localhost` logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ON `sys_operator`.`heartbeat` TO `heartbeat`@`localhost` logger.go:42: 19:42:12 | users/2-check-users | monitor: | logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT, RELOAD, PROCESS, SUPER, REPLICATION CLIENT ON *.* TO `monitor`@`%` logger.go:42: 19:42:12 | users/2-check-users | GRANT BACKUP_ADMIN,SERVICE_CONNECTION_ADMIN,SYSTEM_USER ON *.* TO `monitor`@`%` logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT ON `performance_schema`.* TO `monitor`@`%` logger.go:42: 19:42:12 | users/2-check-users | operator: | logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, SUPER, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, CREATE ROLE, DROP ROLE ON *.* TO `operator`@`%` WITH GRANT OPTION logger.go:42: 19:42:12 | users/2-check-users | GRANT APPLICATION_PASSWORD_ADMIN,AUDIT_ABORT_EXEMPT,AUDIT_ADMIN,AUTHENTICATION_POLICY_ADMIN,BACKUP_ADMIN,BINLOG_ADMIN,BINLOG_ENCRYPTION_ADMIN,CLONE_ADMIN,CONNECTION_ADMIN,ENCRYPTION_KEY_ADMIN,FIREWALL_EXEMPT,FLUSH_OPTIMIZER_COSTS,FLUSH_STATUS,FLUSH_TABLES,FLUSH_USER_RESOURCES,GROUP_REPLICATION_ADMIN,GROUP_REPLICATION_STREAM,INNODB_REDO_LOG_ARCHIVE,INNODB_REDO_LOG_ENABLE,PASSWORDLESS_USER_ADMIN,PERSIST_RO_VARIABLES_ADMIN,REPLICATION_APPLIER,REPLICATION_SLAVE_ADMIN,RESOURCE_GROUP_ADMIN,RESOURCE_GROUP_USER,ROLE_ADMIN,SENSITIVE_VARIABLES_OBSERVER,SERVICE_CONNECTION_ADMIN,SESSION_VARIABLES_ADMIN,SET_USER_ID,SHOW_ROUTINE,SYSTEM_USER,SYSTEM_VARIABLES_ADMIN,TABLE_ENCRYPTION_ADMIN,TELEMETRY_LOG_ADMIN,XA_RECOVER_ADMIN ON *.* TO `operator`@`%` WITH GRANT OPTION logger.go:42: 19:42:12 | users/2-check-users | orchestrator: | logger.go:42: 19:42:12 | users/2-check-users | GRANT RELOAD, PROCESS, SUPER, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO `orchestrator`@`%` logger.go:42: 19:42:12 | users/2-check-users | GRANT SYSTEM_USER ON *.* TO `orchestrator`@`%` logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT ON `sys_operator`.* TO `orchestrator`@`%` logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT ON `mysql`.`slave_master_info` TO `orchestrator`@`%` logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT ON `performance_schema`.`replication_group_members` TO `orchestrator`@`%` logger.go:42: 19:42:12 | users/2-check-users | percona.telemetry: | logger.go:42: 19:42:12 | users/2-check-users | ERROR 1141 (42000) at line 1: There is no such grant defined for user 'percona.telemetry' on host '%' logger.go:42: 19:42:12 | users/2-check-users | command terminated with exit code 1 logger.go:42: 19:42:12 | users/2-check-users | replication: | logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT, RELOAD, SHUTDOWN, PROCESS, FILE, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE USER ON *.* TO `replication`@`%` WITH GRANT OPTION logger.go:42: 19:42:12 | users/2-check-users | GRANT BACKUP_ADMIN,CLONE_ADMIN,CONNECTION_ADMIN,GROUP_REPLICATION_ADMIN,GROUP_REPLICATION_STREAM,PERSIST_RO_VARIABLES_ADMIN,REPLICATION_APPLIER,REPLICATION_SLAVE_ADMIN,ROLE_ADMIN,SYSTEM_USER,SYSTEM_VARIABLES_ADMIN ON *.* TO `replication`@`%` WITH GRANT OPTION logger.go:42: 19:42:12 | users/2-check-users | GRANT INSERT, UPDATE, DELETE ON `mysql`.* TO `replication`@`%` WITH GRANT OPTION logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT ON `performance_schema`.`threads` TO `replication`@`%` logger.go:42: 19:42:12 | users/2-check-users | xtrabackup: | logger.go:42: 19:42:12 | users/2-check-users | GRANT RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT ON *.* TO `xtrabackup`@`localhost` logger.go:42: 19:42:12 | users/2-check-users | GRANT BACKUP_ADMIN,GROUP_REPLICATION_ADMIN,REPLICATION_SLAVE_ADMIN,SYSTEM_USER ON *.* TO `xtrabackup`@`localhost` logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT ON `performance_schema`.`keyring_component_status` TO `xtrabackup`@`localhost` logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT ON `performance_schema`.`log_status` TO `xtrabackup`@`localhost` logger.go:42: 19:42:12 | users/2-check-users | GRANT SELECT ON `performance_schema`.`replication_group_members` TO `xtrabackup`@`localhost` logger.go:42: 19:42:12 | users/2-check-users | kind: ConfigMap logger.go:42: 19:42:12 | users/2-check-users | metadata: logger.go:42: 19:42:12 | users/2-check-users | creationTimestamp: "2025-06-04T19:42:11Z" logger.go:42: 19:42:12 | users/2-check-users | name: 02-check-users logger.go:42: 19:42:12 | users/2-check-users | namespace: kuttl-test-genuine-newt logger.go:42: 19:42:12 | users/2-check-users | resourceVersion: "1749066131967279009" logger.go:42: 19:42:12 | users/2-check-users | uid: d937c6a6-95e7-4a5a-b097-50b3e9f379ca logger.go:42: 19:42:12 | users/2-check-users | test step completed 2-check-users logger.go:42: 19:42:12 | users/3-update-passwords | starting test step 3-update-passwords logger.go:42: 19:42:13 | users/3-update-passwords | Secret:kuttl-test-genuine-newt/test-secrets updated logger.go:42: 19:42:23 | users/3-update-passwords | test step completed 3-update-passwords logger.go:42: 19:42:23 | users/4-check-cluster | starting test step 4-check-cluster logger.go:42: 19:42:23 | users/4-check-cluster | running command: [sh -c set -o pipefail set -o errexit set -o xtrace source ../../functions sleep 30 # wait for cluster status to change to initializing wait_cluster_consistency_async "${test_name}" "3" "3" mysql_args="-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password_updated" users=($(get_mysql_users "${mysql_args}")) # check connection args="" set +o errexit for user in "${users[@]}"; do mysql_args="-h $(get_haproxy_svc $(get_cluster_name)) -u${user} -p${user}_password_updated" pod=mysql-client case $user in heartbeat | xtrabackup) mysql_args="-h localhost -u${user} -p${user}_password_updated" pod="$(get_cluster_name)-mysql-0" ;; esac run_mysql "SELECT 1" "${mysql_args}" "${pod}" args="${args} --from-literal=${user}=$([ $? -eq 0 ] && echo 'success' || echo 'fail')" done set -o errexit kubectl create configmap -n "${NAMESPACE}" 04-check-connections $args kubectl get configmap -n "${NAMESPACE}" 04-check-connections -o yaml # check replication wait_cluster_consistency_async "${test_name}" "3" "3" orc_host=$(get_orc_headless_fqdn $(get_cluster_name) 0) cluster=$(run_curl "http://${orc_host}:3000/api/clusters/" | jq -r .[0]) replicating=$(run_curl "http://${orc_host}:3000/api/cluster/${cluster}/" \ | tee \ | jq -r '.[] | "\(.ReplicationSQLThreadRuning) \(.ReplicationIOThreadRuning)"' \ | grep "true" \ | wc -l \ | sed 's/ *//') kubectl create configmap -n "${NAMESPACE}" 04-check-replication --from-literal=replicating="${replicating}"] logger.go:42: 19:42:23 | users/4-check-cluster | + source ../../functions logger.go:42: 19:42:23 | users/4-check-cluster | +++ realpath ../../.. logger.go:42: 19:42:23 | users/4-check-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:42:23 | users/4-check-cluster | ++++ pwd logger.go:42: 19:42:23 | users/4-check-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/tests/users logger.go:42: 19:42:23 | users/4-check-cluster | ++ test_name=users logger.go:42: 19:42:23 | users/4-check-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/vars.sh logger.go:42: 19:42:23 | users/4-check-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:42:23 | users/4-check-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:42:23 | users/4-check-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy logger.go:42: 19:42:23 | users/4-check-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy logger.go:42: 19:42:23 | users/4-check-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests logger.go:42: 19:42:23 | users/4-check-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests logger.go:42: 19:42:23 | users/4-check-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf logger.go:42: 19:42:23 | users/4-check-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf logger.go:42: 19:42:23 | users/4-check-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 19:42:23 | users/4-check-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 19:42:23 | users/4-check-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:42:23 | users/4-check-cluster | +++ export GIT_BRANCH=PR-920 logger.go:42: 19:42:23 | users/4-check-cluster | +++ GIT_BRANCH=PR-920 logger.go:42: 19:42:23 | users/4-check-cluster | +++ export VERSION=PR-920-3aaba39c logger.go:42: 19:42:23 | users/4-check-cluster | +++ VERSION=PR-920-3aaba39c logger.go:42: 19:42:23 | users/4-check-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:42:23 | users/4-check-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:42:23 | users/4-check-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:42:23 | users/4-check-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:42:23 | users/4-check-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:42:23 | users/4-check-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:42:23 | users/4-check-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:42:23 | users/4-check-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:42:23 | users/4-check-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:42:23 | users/4-check-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:42:23 | users/4-check-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:42:23 | users/4-check-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:42:23 | users/4-check-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:42:23 | users/4-check-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:42:23 | users/4-check-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:42:23 | users/4-check-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:42:23 | users/4-check-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:42:23 | users/4-check-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:42:23 | users/4-check-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:42:23 | users/4-check-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:42:23 | users/4-check-cluster | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:42:23 | users/4-check-cluster | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:42:23 | users/4-check-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 19:42:23 | users/4-check-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 19:42:23 | users/4-check-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:42:23 | users/4-check-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:42:23 | users/4-check-cluster | ++++ which gdate logger.go:42: 19:42:23 | users/4-check-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-920/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:42:23 | users/4-check-cluster | ++++ which date logger.go:42: 19:42:23 | users/4-check-cluster | +++ date=/usr/bin/date logger.go:42: 19:42:23 | users/4-check-cluster | +++ oc get projects logger.go:42: 19:42:23 | users/4-check-cluster | +++ : logger.go:42: 19:42:23 | users/4-check-cluster | +++ kubectl get nodes logger.go:42: 19:42:23 | users/4-check-cluster | +++ grep '^minikube' logger.go:42: 19:42:24 | users/4-check-cluster | ++ oc get projects logger.go:42: 19:42:24 | users/4-check-cluster | + sleep 30 logger.go:42: 19:42:54 | users/4-check-cluster | + wait_cluster_consistency_async users 3 3 logger.go:42: 19:42:54 | users/4-check-cluster | + local cluster_name=users logger.go:42: 19:42:54 | users/4-check-cluster | + local cluster_size=3 logger.go:42: 19:42:54 | users/4-check-cluster | + local orc_size=3 logger.go:42: 19:42:54 | users/4-check-cluster | + '[' -z 3 ']' logger.go:42: 19:42:54 | users/4-check-cluster | + sleep 7 logger.go:42: 19:43:01 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:43:01 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:43:01 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:43:02 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:43:02 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:43:02 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 19:43:02 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:43:02 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 19:43:02 | users/4-check-cluster | + sleep 15 logger.go:42: 19:43:17 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:43:17 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:43:17 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:43:18 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:43:18 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:43:18 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 19:43:18 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:43:18 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 19:43:18 | users/4-check-cluster | + sleep 15 logger.go:42: 19:43:33 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:43:34 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:43:34 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:43:34 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:43:34 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:43:34 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 19:43:34 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:43:34 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 19:43:34 | users/4-check-cluster | + sleep 15 logger.go:42: 19:43:49 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:43:50 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:43:50 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:43:50 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:43:50 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:43:51 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 19:43:51 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:43:51 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 19:43:51 | users/4-check-cluster | + sleep 15 logger.go:42: 19:44:06 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:44:06 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:44:06 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:44:06 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:44:06 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:44:07 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 19:44:07 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:44:07 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 19:44:07 | users/4-check-cluster | + sleep 15 logger.go:42: 19:44:22 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:44:22 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:44:22 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:44:23 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:44:23 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:44:23 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 19:44:23 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:44:23 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 19:44:23 | users/4-check-cluster | + sleep 15 logger.go:42: 19:44:38 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:44:38 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:44:38 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:44:39 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:44:39 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:44:39 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 19:44:39 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:44:39 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 19:44:39 | users/4-check-cluster | + sleep 15 logger.go:42: 19:44:54 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:44:55 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:44:55 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:44:55 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:44:55 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:44:55 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 19:44:55 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:44:55 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 19:44:55 | users/4-check-cluster | + sleep 15 logger.go:42: 19:45:10 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:45:11 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:45:11 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:45:11 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:45:11 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:45:11 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 19:45:11 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:45:11 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 19:45:11 | users/4-check-cluster | + sleep 15 logger.go:42: 19:45:26 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:45:27 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:45:27 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:45:27 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:45:27 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:45:28 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 19:45:28 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:45:28 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 19:45:28 | users/4-check-cluster | + sleep 15 logger.go:42: 19:45:43 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:45:43 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:45:43 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:45:44 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:45:44 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:45:44 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 19:45:44 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:45:44 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 19:45:44 | users/4-check-cluster | + sleep 15 logger.go:42: 19:45:59 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:45:59 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:45:59 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:46:00 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:46:00 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:46:00 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:46:00 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.state}' logger.go:42: 19:46:01 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:46:01 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.state}' logger.go:42: 19:46:01 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:46:01 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 19:46:01 | users/4-check-cluster | +++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:46:01 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 19:46:01 | users/4-check-cluster | ++ local cluster=users logger.go:42: 19:46:01 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 19:46:01 | users/4-check-cluster | + mysql_args='-h users-haproxy -uroot -proot_password_updated' logger.go:42: 19:46:01 | users/4-check-cluster | + users=($(get_mysql_users "${mysql_args}")) logger.go:42: 19:46:01 | users/4-check-cluster | ++ get_mysql_users '-h users-haproxy -uroot -proot_password_updated' logger.go:42: 19:46:01 | users/4-check-cluster | ++ local 'args=-h users-haproxy -uroot -proot_password_updated' logger.go:42: 19:46:01 | users/4-check-cluster | ++ run_mysql 'SELECT user FROM mysql.user' '-h users-haproxy -uroot -proot_password_updated' logger.go:42: 19:46:01 | users/4-check-cluster | ++ grep -vE 'mysql|root' logger.go:42: 19:46:01 | users/4-check-cluster | ++ local 'command=SELECT user FROM mysql.user' logger.go:42: 19:46:01 | users/4-check-cluster | ++ local 'uri=-h users-haproxy -uroot -proot_password_updated' logger.go:42: 19:46:01 | users/4-check-cluster | ++ local pod= logger.go:42: 19:46:01 | users/4-check-cluster | +++ get_client_pod logger.go:42: 19:46:01 | users/4-check-cluster | +++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:46:02 | users/4-check-cluster | ++ client_pod=mysql-client logger.go:42: 19:46:02 | users/4-check-cluster | ++ wait_pod mysql-client logger.go:42: 19:46:02 | users/4-check-cluster | ++ local pod=mysql-client logger.go:42: 19:46:02 | users/4-check-cluster | ++ set +o xtrace logger.go:42: 19:46:02 | users/4-check-cluster | mysql-clienttrue logger.go:42: 19:46:02 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT user FROM mysql.user" | mysql -sN -h users-haproxy -uroot -proot_password_updated' logger.go:42: 19:46:02 | users/4-check-cluster | ++ sed -e 's/mysql: //' logger.go:42: 19:46:02 | users/4-check-cluster | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:46:03 | users/4-check-cluster | + args= logger.go:42: 19:46:03 | users/4-check-cluster | + set +o errexit logger.go:42: 19:46:03 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 19:46:03 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 19:46:03 | users/4-check-cluster | +++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:46:03 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 19:46:03 | users/4-check-cluster | ++ local cluster=users logger.go:42: 19:46:03 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 19:46:03 | users/4-check-cluster | + mysql_args='-h users-haproxy -umonitor -pmonitor_password_updated' logger.go:42: 19:46:03 | users/4-check-cluster | + pod=mysql-client logger.go:42: 19:46:03 | users/4-check-cluster | + case $user in logger.go:42: 19:46:03 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -umonitor -pmonitor_password_updated' mysql-client logger.go:42: 19:46:03 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 19:46:03 | users/4-check-cluster | + local 'uri=-h users-haproxy -umonitor -pmonitor_password_updated' logger.go:42: 19:46:03 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:03 | users/4-check-cluster | ++ get_client_pod logger.go:42: 19:46:03 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:46:04 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 19:46:04 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 19:46:04 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:04 | users/4-check-cluster | + set +o xtrace logger.go:42: 19:46:04 | users/4-check-cluster | mysql-clienttrue logger.go:42: 19:46:04 | users/4-check-cluster | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -umonitor -pmonitor_password_updated' logger.go:42: 19:46:04 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 19:46:04 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:46:05 | users/4-check-cluster | 1 logger.go:42: 19:46:05 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 19:46:05 | users/4-check-cluster | ++ echo success logger.go:42: 19:46:05 | users/4-check-cluster | + args=' --from-literal=monitor=success' logger.go:42: 19:46:05 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 19:46:05 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 19:46:05 | users/4-check-cluster | +++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:46:06 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 19:46:06 | users/4-check-cluster | ++ local cluster=users logger.go:42: 19:46:06 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 19:46:06 | users/4-check-cluster | + mysql_args='-h users-haproxy -uoperator -poperator_password_updated' logger.go:42: 19:46:06 | users/4-check-cluster | + pod=mysql-client logger.go:42: 19:46:06 | users/4-check-cluster | + case $user in logger.go:42: 19:46:06 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -uoperator -poperator_password_updated' mysql-client logger.go:42: 19:46:06 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 19:46:06 | users/4-check-cluster | + local 'uri=-h users-haproxy -uoperator -poperator_password_updated' logger.go:42: 19:46:06 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:06 | users/4-check-cluster | ++ get_client_pod logger.go:42: 19:46:06 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:46:06 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 19:46:06 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 19:46:06 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:06 | users/4-check-cluster | + set +o xtrace logger.go:42: 19:46:06 | users/4-check-cluster | mysql-clienttrue logger.go:42: 19:46:06 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 19:46:06 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:46:06 | users/4-check-cluster | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -uoperator -poperator_password_updated' logger.go:42: 19:46:07 | users/4-check-cluster | 1 logger.go:42: 19:46:07 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 19:46:07 | users/4-check-cluster | ++ echo success logger.go:42: 19:46:07 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success' logger.go:42: 19:46:07 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 19:46:07 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 19:46:07 | users/4-check-cluster | +++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:46:08 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 19:46:08 | users/4-check-cluster | ++ local cluster=users logger.go:42: 19:46:08 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 19:46:08 | users/4-check-cluster | + mysql_args='-h users-haproxy -uorchestrator -porchestrator_password_updated' logger.go:42: 19:46:08 | users/4-check-cluster | + pod=mysql-client logger.go:42: 19:46:08 | users/4-check-cluster | + case $user in logger.go:42: 19:46:08 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -uorchestrator -porchestrator_password_updated' mysql-client logger.go:42: 19:46:08 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 19:46:08 | users/4-check-cluster | + local 'uri=-h users-haproxy -uorchestrator -porchestrator_password_updated' logger.go:42: 19:46:08 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:08 | users/4-check-cluster | ++ get_client_pod logger.go:42: 19:46:08 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:46:08 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 19:46:08 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 19:46:08 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:08 | users/4-check-cluster | + set +o xtrace logger.go:42: 19:46:09 | users/4-check-cluster | mysql-clienttrue logger.go:42: 19:46:09 | users/4-check-cluster | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -uorchestrator -porchestrator_password_updated' logger.go:42: 19:46:09 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 19:46:09 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:46:09 | users/4-check-cluster | 1 logger.go:42: 19:46:09 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 19:46:09 | users/4-check-cluster | ++ echo success logger.go:42: 19:46:09 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success' logger.go:42: 19:46:09 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 19:46:09 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 19:46:09 | users/4-check-cluster | +++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:46:10 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 19:46:10 | users/4-check-cluster | ++ local cluster=users logger.go:42: 19:46:10 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 19:46:10 | users/4-check-cluster | + mysql_args='-h users-haproxy -ureplication -preplication_password_updated' logger.go:42: 19:46:10 | users/4-check-cluster | + pod=mysql-client logger.go:42: 19:46:10 | users/4-check-cluster | + case $user in logger.go:42: 19:46:10 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -ureplication -preplication_password_updated' mysql-client logger.go:42: 19:46:10 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 19:46:10 | users/4-check-cluster | + local 'uri=-h users-haproxy -ureplication -preplication_password_updated' logger.go:42: 19:46:10 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:10 | users/4-check-cluster | ++ get_client_pod logger.go:42: 19:46:10 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:46:10 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 19:46:10 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 19:46:10 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:10 | users/4-check-cluster | + set +o xtrace logger.go:42: 19:46:11 | users/4-check-cluster | mysql-clienttrue logger.go:42: 19:46:11 | users/4-check-cluster | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -ureplication -preplication_password_updated' logger.go:42: 19:46:11 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 19:46:11 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:46:11 | users/4-check-cluster | 1 logger.go:42: 19:46:11 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 19:46:11 | users/4-check-cluster | ++ echo success logger.go:42: 19:46:11 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success' logger.go:42: 19:46:11 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 19:46:11 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 19:46:11 | users/4-check-cluster | +++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:46:12 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 19:46:12 | users/4-check-cluster | ++ local cluster=users logger.go:42: 19:46:12 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 19:46:12 | users/4-check-cluster | + mysql_args='-h users-haproxy -uheartbeat -pheartbeat_password_updated' logger.go:42: 19:46:12 | users/4-check-cluster | + pod=mysql-client logger.go:42: 19:46:12 | users/4-check-cluster | + case $user in logger.go:42: 19:46:12 | users/4-check-cluster | + mysql_args='-h localhost -uheartbeat -pheartbeat_password_updated' logger.go:42: 19:46:12 | users/4-check-cluster | ++ get_cluster_name logger.go:42: 19:46:12 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:46:12 | users/4-check-cluster | + pod=users-mysql-0 logger.go:42: 19:46:12 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h localhost -uheartbeat -pheartbeat_password_updated' users-mysql-0 logger.go:42: 19:46:12 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 19:46:12 | users/4-check-cluster | + local 'uri=-h localhost -uheartbeat -pheartbeat_password_updated' logger.go:42: 19:46:12 | users/4-check-cluster | + local pod=users-mysql-0 logger.go:42: 19:46:12 | users/4-check-cluster | ++ get_client_pod logger.go:42: 19:46:12 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:46:13 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 19:46:13 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 19:46:13 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:13 | users/4-check-cluster | + set +o xtrace logger.go:42: 19:46:13 | users/4-check-cluster | mysql-clienttrue logger.go:42: 19:46:13 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 19:46:13 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:46:13 | users/4-check-cluster | + kubectl -n kuttl-test-genuine-newt exec users-mysql-0 -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h localhost -uheartbeat -pheartbeat_password_updated' logger.go:42: 19:46:14 | users/4-check-cluster | Defaulted container "mysql" out of: mysql, xtrabackup, pt-heartbeat, mysql-init (init) logger.go:42: 19:46:14 | users/4-check-cluster | 1 logger.go:42: 19:46:14 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 19:46:14 | users/4-check-cluster | ++ echo success logger.go:42: 19:46:14 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success --from-literal=heartbeat=success' logger.go:42: 19:46:14 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 19:46:14 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 19:46:14 | users/4-check-cluster | +++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:46:14 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 19:46:14 | users/4-check-cluster | ++ local cluster=users logger.go:42: 19:46:14 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 19:46:14 | users/4-check-cluster | + mysql_args='-h users-haproxy -upercona.telemetry -ppercona.telemetry_password_updated' logger.go:42: 19:46:14 | users/4-check-cluster | + pod=mysql-client logger.go:42: 19:46:14 | users/4-check-cluster | + case $user in logger.go:42: 19:46:14 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -upercona.telemetry -ppercona.telemetry_password_updated' mysql-client logger.go:42: 19:46:14 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 19:46:14 | users/4-check-cluster | + local 'uri=-h users-haproxy -upercona.telemetry -ppercona.telemetry_password_updated' logger.go:42: 19:46:14 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:14 | users/4-check-cluster | ++ get_client_pod logger.go:42: 19:46:14 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:46:15 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 19:46:15 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 19:46:15 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:15 | users/4-check-cluster | + set +o xtrace logger.go:42: 19:46:15 | users/4-check-cluster | mysql-clienttrue logger.go:42: 19:46:15 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 19:46:15 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:46:15 | users/4-check-cluster | + kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -upercona.telemetry -ppercona.telemetry_password_updated' logger.go:42: 19:46:16 | users/4-check-cluster | ERROR 1045 (28000): Access denied for user 'percona.telemetry'@'10.175.224.32' (using password: YES) logger.go:42: 19:46:16 | users/4-check-cluster | command terminated with exit code 1 logger.go:42: 19:46:16 | users/4-check-cluster | ++ '[' 1 -eq 0 ']' logger.go:42: 19:46:16 | users/4-check-cluster | ++ echo fail logger.go:42: 19:46:16 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success --from-literal=heartbeat=success --from-literal=percona.telemetry=fail' logger.go:42: 19:46:16 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 19:46:16 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 19:46:16 | users/4-check-cluster | +++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:46:16 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 19:46:16 | users/4-check-cluster | ++ local cluster=users logger.go:42: 19:46:16 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 19:46:16 | users/4-check-cluster | + mysql_args='-h users-haproxy -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 19:46:16 | users/4-check-cluster | + pod=mysql-client logger.go:42: 19:46:16 | users/4-check-cluster | + case $user in logger.go:42: 19:46:16 | users/4-check-cluster | + mysql_args='-h localhost -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 19:46:16 | users/4-check-cluster | ++ get_cluster_name logger.go:42: 19:46:16 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:46:17 | users/4-check-cluster | + pod=users-mysql-0 logger.go:42: 19:46:17 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h localhost -uxtrabackup -pxtrabackup_password_updated' users-mysql-0 logger.go:42: 19:46:17 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 19:46:17 | users/4-check-cluster | + local 'uri=-h localhost -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 19:46:17 | users/4-check-cluster | + local pod=users-mysql-0 logger.go:42: 19:46:17 | users/4-check-cluster | ++ get_client_pod logger.go:42: 19:46:17 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:46:17 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 19:46:17 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 19:46:17 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 19:46:17 | users/4-check-cluster | + set +o xtrace logger.go:42: 19:46:18 | users/4-check-cluster | mysql-clienttrue logger.go:42: 19:46:18 | users/4-check-cluster | + kubectl -n kuttl-test-genuine-newt exec users-mysql-0 -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h localhost -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 19:46:18 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 19:46:18 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:46:19 | users/4-check-cluster | Defaulted container "mysql" out of: mysql, xtrabackup, pt-heartbeat, mysql-init (init) logger.go:42: 19:46:19 | users/4-check-cluster | 1 logger.go:42: 19:46:19 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 19:46:19 | users/4-check-cluster | ++ echo success logger.go:42: 19:46:19 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success --from-literal=heartbeat=success --from-literal=percona.telemetry=fail --from-literal=xtrabackup=success' logger.go:42: 19:46:19 | users/4-check-cluster | + set -o errexit logger.go:42: 19:46:19 | users/4-check-cluster | + kubectl create configmap -n kuttl-test-genuine-newt 04-check-connections --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success --from-literal=heartbeat=success --from-literal=percona.telemetry=fail --from-literal=xtrabackup=success logger.go:42: 19:46:19 | users/4-check-cluster | configmap/04-check-connections created logger.go:42: 19:46:19 | users/4-check-cluster | + kubectl get configmap -n kuttl-test-genuine-newt 04-check-connections -o yaml logger.go:42: 19:46:19 | users/4-check-cluster | apiVersion: v1 logger.go:42: 19:46:19 | users/4-check-cluster | data: logger.go:42: 19:46:19 | users/4-check-cluster | heartbeat: success logger.go:42: 19:46:19 | users/4-check-cluster | monitor: success logger.go:42: 19:46:19 | users/4-check-cluster | operator: success logger.go:42: 19:46:19 | users/4-check-cluster | orchestrator: success logger.go:42: 19:46:19 | users/4-check-cluster | percona.telemetry: fail logger.go:42: 19:46:19 | users/4-check-cluster | replication: success logger.go:42: 19:46:19 | users/4-check-cluster | xtrabackup: success logger.go:42: 19:46:19 | users/4-check-cluster | kind: ConfigMap logger.go:42: 19:46:19 | users/4-check-cluster | metadata: logger.go:42: 19:46:19 | users/4-check-cluster | creationTimestamp: "2025-06-04T19:46:19Z" logger.go:42: 19:46:19 | users/4-check-cluster | name: 04-check-connections logger.go:42: 19:46:19 | users/4-check-cluster | namespace: kuttl-test-genuine-newt logger.go:42: 19:46:19 | users/4-check-cluster | resourceVersion: "1749066379385343019" logger.go:42: 19:46:19 | users/4-check-cluster | uid: a8243eb4-42c4-4707-a26f-ca73bfdbf828 logger.go:42: 19:46:19 | users/4-check-cluster | + wait_cluster_consistency_async users 3 3 logger.go:42: 19:46:19 | users/4-check-cluster | + local cluster_name=users logger.go:42: 19:46:19 | users/4-check-cluster | + local cluster_size=3 logger.go:42: 19:46:19 | users/4-check-cluster | + local orc_size=3 logger.go:42: 19:46:19 | users/4-check-cluster | + '[' -z 3 ']' logger.go:42: 19:46:19 | users/4-check-cluster | + sleep 7 logger.go:42: 19:46:26 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:46:27 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:46:27 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:46:27 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:46:27 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:46:28 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 19:46:28 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.orchestrator.state}' logger.go:42: 19:46:28 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:46:28 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-genuine-newt -o 'jsonpath={.status.state}' logger.go:42: 19:46:28 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:46:28 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 19:46:28 | users/4-check-cluster | +++ kubectl -n kuttl-test-genuine-newt get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:46:29 | users/4-check-cluster | ++ get_orc_headless_fqdn users 0 logger.go:42: 19:46:29 | users/4-check-cluster | ++ local cluster=users logger.go:42: 19:46:29 | users/4-check-cluster | ++ local index=0 logger.go:42: 19:46:29 | users/4-check-cluster | ++ echo users-orc-0.users-orc logger.go:42: 19:46:29 | users/4-check-cluster | + orc_host=users-orc-0.users-orc logger.go:42: 19:46:29 | users/4-check-cluster | ++ run_curl http://users-orc-0.users-orc:3000/api/clusters/ logger.go:42: 19:46:29 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'curl -s -k http://users-orc-0.users-orc:3000/api/clusters/' logger.go:42: 19:46:29 | users/4-check-cluster | ++ jq -r '.[0]' logger.go:42: 19:46:29 | users/4-check-cluster | + cluster=users-mysql-0.users-mysql.kuttl-test-genuine-newt:3306 logger.go:42: 19:46:29 | users/4-check-cluster | ++ run_curl http://users-orc-0.users-orc:3000/api/cluster/users-mysql-0.users-mysql.kuttl-test-genuine-newt:3306/ logger.go:42: 19:46:29 | users/4-check-cluster | ++ tee logger.go:42: 19:46:29 | users/4-check-cluster | ++ kubectl -n kuttl-test-genuine-newt exec mysql-client -- bash -c 'curl -s -k http://users-orc-0.users-orc:3000/api/cluster/users-mysql-0.users-mysql.kuttl-test-genuine-newt:3306/' logger.go:42: 19:46:29 | users/4-check-cluster | ++ grep true logger.go:42: 19:46:29 | users/4-check-cluster | ++ jq -r '.[] | "\(.ReplicationSQLThreadRuning) \(.ReplicationIOThreadRuning)"' logger.go:42: 19:46:29 | users/4-check-cluster | ++ wc -l logger.go:42: 19:46:29 | users/4-check-cluster | ++ sed 's/ *//' logger.go:42: 19:46:30 | users/4-check-cluster | + replicating=2 logger.go:42: 19:46:30 | users/4-check-cluster | + kubectl create configmap -n kuttl-test-genuine-newt 04-check-replication --from-literal=replicating=2 logger.go:42: 19:46:31 | users/4-check-cluster | configmap/04-check-replication created logger.go:42: 19:46:32 | users/4-check-cluster | test step completed 4-check-cluster logger.go:42: 19:46:32 | users/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 19:46:33 | users/98-drop-finalizer | PerconaServerMySQL:kuttl-test-genuine-newt/users updated logger.go:42: 19:46:33 | users/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/tests/users logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | ++ test_name=users logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/vars.sh logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-920 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/deploy logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-920/e2e-tests/conf logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/users logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-920 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-920 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export VERSION=PR-920-3aaba39c logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ VERSION=PR-920-3aaba39c logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-920-3aaba39c logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export MINIO_VER=5.4.0 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ MINIO_VER=5.4.0 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-920/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | ++++ which date logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ date=/usr/bin/date logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ : logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 19:46:33 | users/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 19:46:34 | users/99-remove-cluster-gracefully | ++ oc get projects logger.go:42: 19:46:34 | users/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 19:46:34 | users/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 19:46:34 | users/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 19:46:35 | users/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 19:46:35 | users/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 19:46:35 | users/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 19:46:35 | users/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 19:46:36 | users/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 19:46:42 | users/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 19:46:43 | users | users events from ns kuttl-test-genuine-newt: logger.go:42: 19:46:43 | users | 2025-06-04 19:38:38 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-genuine-newt/mysql-client to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-vs4x default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:38:39 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.0.33" already present on machine kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:39 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:39 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:49 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:38:49 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:38:49 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-genuine-newt/datadir-users-mysql-0" pd.csi.storage.gke.io_gke-da38c170538c49c99588-777b-53f1-vm_ee1b7ba4-894a-4ec9-9c08-aaeaf8e818b1 logger.go:42: 19:46:43 | users | 2025-06-04 19:38:49 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Claim datadir-users-mysql-0 Pod users-mysql-0 in StatefulSet users-mysql success statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:38:49 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Pod users-mysql-0 in StatefulSet users-mysql successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:38:50 +0000 UTC Normal Pod users-orc-0 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-orc-0 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-brgj default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:38:50 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulCreate create Pod users-orc-0 in StatefulSet users-orc successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:38:51 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:51 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 290ms (290ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:51 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:51 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:53 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-cc207baa-ea0f-47e5-aaf2-545b5eb68ca5 pd.csi.storage.gke.io_gke-da38c170538c49c99588-777b-53f1-vm_ee1b7ba4-894a-4ec9-9c08-aaeaf8e818b1 logger.go:42: 19:46:43 | users | 2025-06-04 19:38:53 +0000 UTC Normal Pod users-mysql-0 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-mysql-0 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-vs4x default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:38:54 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:55 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 187ms (188ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:55 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:55 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:55 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:55 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 284ms (284ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:55 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:38:55 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:01 +0000 UTC Normal Pod users-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-cc207baa-ea0f-47e5-aaf2-545b5eb68ca5" attachdetach-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:39:02 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:02 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 280ms (280ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:02 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:02 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:04 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:04 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 233ms (233ms including waiting). Image size: 436552375 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:04 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:05 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:05 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:05 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 258ms (258ms including waiting). Image size: 445859149 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:05 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:05 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:05 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:05 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 200ms (200ms including waiting). Image size: 132962143 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:05 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:05 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:26 +0000 UTC Normal Pod users-orc-1 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-orc-1 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-vs4x default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:39:26 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:26 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulCreate create Pod users-orc-1 in StatefulSet users-orc successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:39:27 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 264ms (264ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:27 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:27 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:28 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:28 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 165ms (165ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:28 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:28 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:28 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:29 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 208ms (208ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:29 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:29 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:37 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:39:37 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:39:37 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-genuine-newt/datadir-users-mysql-1" pd.csi.storage.gke.io_gke-da38c170538c49c99588-777b-53f1-vm_ee1b7ba4-894a-4ec9-9c08-aaeaf8e818b1 logger.go:42: 19:46:43 | users | 2025-06-04 19:39:37 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Claim datadir-users-mysql-1 Pod users-mysql-1 in StatefulSet users-mysql success statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:39:37 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Pod users-mysql-1 in StatefulSet users-mysql successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:39:38 +0000 UTC Normal Pod users-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-haproxy-0 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-vs4x default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:39:38 +0000 UTC Normal StatefulSet.apps users-haproxy SuccessfulCreate create Pod users-haproxy-0 in StatefulSet users-haproxy successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:39:39 +0000 UTC Warning Pod users-haproxy-0 FailedMount MountVolume.SetUp failed for volume "config" : failed to sync configmap cache: timed out waiting for the condition kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:40 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:40 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 227ms (227ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:40 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:40 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:41 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-d16bdddc-304e-4c2e-a81e-62db16e3b263 pd.csi.storage.gke.io_gke-da38c170538c49c99588-777b-53f1-vm_ee1b7ba4-894a-4ec9-9c08-aaeaf8e818b1 logger.go:42: 19:46:43 | users | 2025-06-04 19:39:41 +0000 UTC Normal Pod users-mysql-1 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-mysql-1 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-brgj default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:39:42 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:42 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 230ms (230ms including waiting). Image size: 102739705 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:42 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:42 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:43 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:43 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 163ms (163ms including waiting). Image size: 102739705 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:43 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:43 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:43 +0000 UTC Normal Pod users-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-haproxy-1 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-nxpj default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:39:43 +0000 UTC Normal StatefulSet.apps users-haproxy SuccessfulCreate create Pod users-haproxy-1 in StatefulSet users-haproxy successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:39:44 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:44 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 277ms (277ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:44 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:44 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:46 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:46 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 214ms (214ms including waiting). Image size: 102739705 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:46 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:46 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:46 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:46 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 243ms (243ms including waiting). Image size: 102739705 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:46 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:46 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:47 +0000 UTC Normal Pod users-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-haproxy-2 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-brgj default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:39:47 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:47 +0000 UTC Normal StatefulSet.apps users-haproxy SuccessfulCreate create Pod users-haproxy-2 in StatefulSet users-haproxy successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:39:48 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 248ms (248ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:48 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:48 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:49 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:49 +0000 UTC Normal Pod users-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d16bdddc-304e-4c2e-a81e-62db16e3b263" attachdetach-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:39:50 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 192ms (192ms including waiting). Image size: 102739705 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:50 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:50 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:50 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:50 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 208ms (208ms including waiting). Image size: 102739705 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:50 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:50 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:52 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:52 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 305ms (305ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:52 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:52 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:53 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:54 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 234ms (234ms including waiting). Image size: 436552375 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:54 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:54 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:54 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:54 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 249ms (249ms including waiting). Image size: 445859149 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:54 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:54 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:54 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:54 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 242ms (242ms including waiting). Image size: 132962143 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:54 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:39:55 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:01 +0000 UTC Normal Pod users-orc-2 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-orc-2 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-nxpj default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:40:01 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulCreate create Pod users-orc-2 in StatefulSet users-orc successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:40:02 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:02 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 275ms (275ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:02 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:03 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:04 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:04 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 191ms (191ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:04 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:04 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:04 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:04 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 239ms (239ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:05 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:05 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:13 +0000 UTC Warning Pod users-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/06/04 19:40:12 Waiting for MySQL ready state 2025/06/04 19:40:12 MySQL is ready 2025/06/04 19:40:12 Peers: [3233393330363862.users-mysql-unready.kuttl-test-genuine-newt 6237386162376636.users-mysql-unready.kuttl-test-genuine-newt] 2025/06/04 19:40:12 FQDN: users-mysql-1.users-mysql.kuttl-test-genuine-newt 2025/06/04 19:40:12 Primary: users-mysql-0.users-mysql.kuttl-test-genuine-newt Replicas: [users-mysql-1.users-mysql.kuttl-test-genuine-newt] 2025/06/04 19:40:12 lookup users-mysql-1 [10.175.226.26] 2025/06/04 19:40:12 PodIP: 10.175.226.26 2025/06/04 19:40:12 lookup users-mysql-0.users-mysql.kuttl-test-genuine-newt [10.175.225.39] 2025/06/04 19:40:12 PrimaryIP: 10.175.225.39 2025/06/04 19:40:12 Donor: users-mysql-0.users-mysql.kuttl-test-genuine-newt 2025/06/04 19:40:12 Opening connection to 10.175.226.26 2025/06/04 19:40:12 Clone required: true 2025/06/04 19:40:12 Checking if a clone in progress 2025/06/04 19:40:12 Clone in progress: false 2025/06/04 19:40:12 Cloning from users-mysql-0.users-mysql.kuttl-test-genuine-newt 2025/06/04 19:40:13 Clone finished. Restarting container... kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:13 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:17 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 245ms (245ms including waiting). Image size: 436552375 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:40:47 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:40:47 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:40:47 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-genuine-newt/datadir-users-mysql-2" pd.csi.storage.gke.io_gke-da38c170538c49c99588-777b-53f1-vm_ee1b7ba4-894a-4ec9-9c08-aaeaf8e818b1 logger.go:42: 19:46:43 | users | 2025-06-04 19:40:47 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Claim datadir-users-mysql-2 Pod users-mysql-2 in StatefulSet users-mysql success statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:40:47 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Pod users-mysql-2 in StatefulSet users-mysql successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:40:51 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-8e1fd884-2b8b-472b-acef-44853dbb3771 pd.csi.storage.gke.io_gke-da38c170538c49c99588-777b-53f1-vm_ee1b7ba4-894a-4ec9-9c08-aaeaf8e818b1 logger.go:42: 19:46:43 | users | 2025-06-04 19:40:51 +0000 UTC Normal Pod users-mysql-2 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-mysql-2 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-nxpj default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:40:59 +0000 UTC Normal Pod users-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-8e1fd884-2b8b-472b-acef-44853dbb3771" attachdetach-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:41:00 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:01 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 241ms (241ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:01 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:01 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:03 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:03 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 269ms (269ms including waiting). Image size: 436552375 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:03 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:03 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:03 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:04 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 178ms (178ms including waiting). Image size: 445859149 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:04 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:04 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:04 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:04 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 246ms (246ms including waiting). Image size: 132962143 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:04 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:04 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:21 +0000 UTC Warning Pod users-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/06/04 19:41:20 Waiting for MySQL ready state 2025/06/04 19:41:20 MySQL is ready 2025/06/04 19:41:20 Peers: [3233393330363862.users-mysql-unready.kuttl-test-genuine-newt 6237386162376636.users-mysql-unready.kuttl-test-genuine-newt 6366613864383134.users-mysql-unready.kuttl-test-genuine-newt] 2025/06/04 19:41:20 FQDN: users-mysql-2.users-mysql.kuttl-test-genuine-newt 2025/06/04 19:41:20 Primary: users-mysql-0.users-mysql.kuttl-test-genuine-newt Replicas: [users-mysql-1.users-mysql.kuttl-test-genuine-newt users-mysql-2.users-mysql.kuttl-test-genuine-newt] 2025/06/04 19:41:20 lookup users-mysql-2 [10.175.224.34] 2025/06/04 19:41:20 PodIP: 10.175.224.34 2025/06/04 19:41:20 lookup users-mysql-0.users-mysql.kuttl-test-genuine-newt [10.175.225.39] 2025/06/04 19:41:20 PrimaryIP: 10.175.225.39 2025/06/04 19:41:20 Donor: users-mysql-1.users-mysql.kuttl-test-genuine-newt 2025/06/04 19:41:20 Opening connection to 10.175.224.34 2025/06/04 19:41:20 Clone required: true 2025/06/04 19:41:20 Checking if a clone in progress 2025/06/04 19:41:20 Clone in progress: false 2025/06/04 19:41:20 Cloning from users-mysql-1.users-mysql.kuttl-test-genuine-newt 2025/06/04 19:41:21 Clone finished. Restarting container... kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:21 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:41:25 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 371ms (371ms including waiting). Image size: 436552375 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:23 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:23 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:23 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulDelete delete Pod users-orc-2 in StatefulSet users-orc successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:42:54 +0000 UTC Normal Pod users-orc-2 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-orc-2 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-nxpj default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:42:54 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:54 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 252ms (252ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:54 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:54 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:56 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:57 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 244ms (244ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:57 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:57 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:57 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:57 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 231ms (231ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:57 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:42:57 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:43:29 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:43:29 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:43:29 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulDelete delete Pod users-orc-1 in StatefulSet users-orc successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:44:00 +0000 UTC Normal Pod users-orc-1 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-orc-1 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-vs4x default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:44:01 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:01 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 201ms (201ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:01 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:01 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:03 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:03 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 187ms (187ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:03 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:03 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:03 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:03 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 213ms (213ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:03 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:03 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:36 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:36 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:44:36 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulDelete delete Pod users-orc-0 in StatefulSet users-orc successful statefulset-controller logger.go:42: 19:46:43 | users | 2025-06-04 19:45:07 +0000 UTC Normal Pod users-orc-0 Binding Scheduled Successfully assigned kuttl-test-genuine-newt/users-orc-0 to gke-jen-ps-920-3aaba39c--default-pool-1b7df7c9-brgj default-scheduler logger.go:42: 19:46:43 | users | 2025-06-04 19:45:07 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:45:07 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-920-3aaba39c" in 255ms (255ms including waiting). Image size: 108787366 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:45:07 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:45:07 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:45:09 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:45:10 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 225ms (225ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:45:10 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:45:10 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:45:10 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:45:10 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 187ms (187ms including waiting). Image size: 72481192 bytes. kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:45:10 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:45:10 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:34 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:35 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:35 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:35 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:35 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:35 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:35 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:35 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:35 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:35 +0000 UTC Warning Pod users-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/06/04 19:46:35 MySQL state is not ready... kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:37 +0000 UTC Warning Pod users-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/06/04 19:46:37 MySQL state is not ready... kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:37 +0000 UTC Warning Pod users-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/06/04 19:46:37 MySQL state is not ready... kubelet logger.go:42: 19:46:43 | users | 2025-06-04 19:46:42 +0000 UTC Warning Pod users-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/06/04 19:46:42 MySQL state is not ready... kubelet logger.go:42: 19:46:43 | users | Deleting namespace: kuttl-test-genuine-newt === NAME kuttl harness.go:403: run tests finished harness.go:510: cleaning up harness.go:567: removing temp folder: "" --- PASS: kuttl (545.09s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/users (544.43s) PASS