=== RUN kuttl harness.go:462: starting setup harness.go:252: running tests using configured kubeconfig. harness.go:275: Successful connection to cluster at: https://35.184.75.41 harness.go:360: running tests harness.go:73: going to run test suite with timeout of 180 seconds for each step harness.go:372: testsuite: e2e-tests/tests has 28 tests === RUN kuttl/harness === RUN kuttl/harness/users === PAUSE kuttl/harness/users === CONT kuttl/harness/users logger.go:42: 15:06:49 | users | Creating namespace: kuttl-test-worthy-amoeba logger.go:42: 15:06:49 | users/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 15:06:49 | users/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 15:06:49 | users/0-deploy-operator | + source ../../functions logger.go:42: 15:06:49 | users/0-deploy-operator | +++ realpath ../../.. logger.go:42: 15:06:49 | users/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:06:49 | users/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:06:49 | users/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:06:49 | users/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:06:49 | users/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:06:49 | users/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:06:49 | users/0-deploy-operator | ++++ mktemp -d logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export TEMP_DIR=/tmp/tmp.WshbnHeIMY logger.go:42: 15:06:49 | users/0-deploy-operator | +++ TEMP_DIR=/tmp/tmp.WshbnHeIMY logger.go:42: 15:06:49 | users/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:06:49 | users/0-deploy-operator | +++ GIT_BRANCH=PR-424 logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export VERSION=PR-424-70568ae logger.go:42: 15:06:49 | users/0-deploy-operator | +++ VERSION=PR-424-70568ae logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:06:49 | users/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:06:49 | users/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:06:49 | users/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:06:49 | users/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:06:49 | users/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:06:49 | users/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:06:49 | users/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:06:49 | users/0-deploy-operator | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:06:49 | users/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:06:49 | users/0-deploy-operator | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:06:49 | users/0-deploy-operator | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:06:49 | users/0-deploy-operator | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:06:49 | users/0-deploy-operator | ++++ which gdate logger.go:42: 15:06:49 | users/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:06:49 | users/0-deploy-operator | ++++ which date logger.go:42: 15:06:49 | users/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 15:06:49 | users/0-deploy-operator | +++ command -v oc logger.go:42: 15:06:49 | users/0-deploy-operator | +++ oc get projects logger.go:42: 15:06:54 | users/0-deploy-operator | error: the server doesn't have a resource type "projects" logger.go:42: 15:06:54 | users/0-deploy-operator | +++ grep '^minikube' logger.go:42: 15:06:54 | users/0-deploy-operator | +++ kubectl get nodes logger.go:42: 15:06:55 | users/0-deploy-operator | ++++ pwd logger.go:42: 15:06:55 | users/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/users logger.go:42: 15:06:55 | users/0-deploy-operator | ++ test_name=users logger.go:42: 15:06:55 | users/0-deploy-operator | + deploy_operator logger.go:42: 15:06:55 | users/0-deploy-operator | + kubectl -n kuttl-test-worthy-amoeba apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy/crd.yaml logger.go:42: 15:06:56 | users/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 15:06:57 | users/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 15:06:58 | users/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 15:06:58 | users/0-deploy-operator | + kubectl -n kuttl-test-worthy-amoeba apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy/rbac.yaml logger.go:42: 15:06:59 | users/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 15:06:59 | users/0-deploy-operator | serviceaccount/percona-server-mysql-operator-orchestrator created logger.go:42: 15:07:00 | users/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 15:07:00 | users/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 15:07:00 | users/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-orchestrator created logger.go:42: 15:07:01 | users/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 15:07:01 | users/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 15:07:01 | users/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-orchestrator created logger.go:42: 15:07:01 | users/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 15:07:01 | users/0-deploy-operator | + kubectl -n kuttl-test-worthy-amoeba apply -f - logger.go:42: 15:07:01 | users/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:07:01 | users/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-424-70568ae"' /mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy/operator.yaml logger.go:42: 15:07:03 | users/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 15:07:03 | users/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 15:07:03 | users/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 15:07:03 | users/0-deploy-operator | + kubectl -n kuttl-test-worthy-amoeba apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf/secrets.yaml logger.go:42: 15:07:04 | users/0-deploy-operator | secret/test-secrets created logger.go:42: 15:07:04 | users/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 15:07:04 | users/0-deploy-operator | + kubectl -n kuttl-test-worthy-amoeba apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf/ssl-secret.yaml logger.go:42: 15:07:06 | users/0-deploy-operator | secret/test-ssl created logger.go:42: 15:07:06 | users/0-deploy-operator | + deploy_client logger.go:42: 15:07:06 | users/0-deploy-operator | + kubectl -n kuttl-test-worthy-amoeba apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf/client.yaml logger.go:42: 15:07:07 | users/0-deploy-operator | pod/mysql-client created logger.go:42: 15:07:15 | users/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 15:07:15 | users/1-create-cluster | starting test step 1-create-cluster logger.go:42: 15:07:15 | users/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 15:07:15 | users/1-create-cluster | + source ../../functions logger.go:42: 15:07:15 | users/1-create-cluster | +++ realpath ../../.. logger.go:42: 15:07:15 | users/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:07:15 | users/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:07:15 | users/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:07:15 | users/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:07:15 | users/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:07:15 | users/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:07:15 | users/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:07:15 | users/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:07:15 | users/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:07:15 | users/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:07:15 | users/1-create-cluster | ++++ mktemp -d logger.go:42: 15:07:15 | users/1-create-cluster | +++ export TEMP_DIR=/tmp/tmp.aWOpItWvI5 logger.go:42: 15:07:15 | users/1-create-cluster | +++ TEMP_DIR=/tmp/tmp.aWOpItWvI5 logger.go:42: 15:07:15 | users/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:07:15 | users/1-create-cluster | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:07:15 | users/1-create-cluster | +++ GIT_BRANCH=PR-424 logger.go:42: 15:07:15 | users/1-create-cluster | +++ export VERSION=PR-424-70568ae logger.go:42: 15:07:15 | users/1-create-cluster | +++ VERSION=PR-424-70568ae logger.go:42: 15:07:15 | users/1-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:07:15 | users/1-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:07:15 | users/1-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:07:15 | users/1-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:07:15 | users/1-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:07:15 | users/1-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:07:15 | users/1-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:07:15 | users/1-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:07:15 | users/1-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:07:15 | users/1-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:07:15 | users/1-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:07:15 | users/1-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:07:15 | users/1-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:07:15 | users/1-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:07:15 | users/1-create-cluster | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:07:15 | users/1-create-cluster | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:07:15 | users/1-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:07:15 | users/1-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:07:15 | users/1-create-cluster | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:07:15 | users/1-create-cluster | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:07:15 | users/1-create-cluster | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:07:15 | users/1-create-cluster | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:07:15 | users/1-create-cluster | ++++ which gdate logger.go:42: 15:07:15 | users/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:07:15 | users/1-create-cluster | ++++ which date logger.go:42: 15:07:15 | users/1-create-cluster | +++ date=/usr/bin/date logger.go:42: 15:07:15 | users/1-create-cluster | +++ command -v oc logger.go:42: 15:07:15 | users/1-create-cluster | +++ oc get projects logger.go:42: 15:07:20 | users/1-create-cluster | error: the server doesn't have a resource type "projects" logger.go:42: 15:07:20 | users/1-create-cluster | +++ kubectl get nodes logger.go:42: 15:07:20 | users/1-create-cluster | +++ grep '^minikube' logger.go:42: 15:07:21 | users/1-create-cluster | ++++ pwd logger.go:42: 15:07:21 | users/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/users logger.go:42: 15:07:21 | users/1-create-cluster | ++ test_name=users logger.go:42: 15:07:21 | users/1-create-cluster | + get_cr logger.go:42: 15:07:21 | users/1-create-cluster | + local name_suffix= logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 15:07:21 | users/1-create-cluster | + kubectl -n kuttl-test-worthy-amoeba apply -f - logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 15:07:21 | users/1-create-cluster | ++ printf '.metadata.name="%s"' users logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.metadata.name="users"' /mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy/cr.yaml logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 15:07:21 | users/1-create-cluster | + '[' -n '' ']' logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval - logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:07:21 | users/1-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 15:07:21 | users/1-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:07:21 | users/1-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 15:07:21 | users/1-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:07:21 | users/1-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-424-70568ae"' - logger.go:42: 15:07:21 | users/1-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 15:07:21 | users/1-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 15:07:21 | users/1-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:07:21 | users/1-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 15:07:22 | users/1-create-cluster | perconaservermysql.ps.percona.com/users created logger.go:42: 15:10:33 | users/1-create-cluster | test step completed 1-create-cluster logger.go:42: 15:10:33 | users/2-check-users | starting test step 2-check-users logger.go:42: 15:10:33 | users/2-check-users | running command: [sh -c set -o errexit set -o xtrace source ../../functions mysql_args="-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" users=($(get_mysql_users "${mysql_args}")) args='' for user in "${users[@]}"; do host="%" case $user in heartbeat | xtrabackup) host="localhost" ;; esac query="SHOW GRANTS FOR '${user}'@'${host}';" run_mysql "${query}" "${mysql_args}" \ | sed -E "s/'(10|192)[.][0-9][^']*'//; s/'[^']*[.]internal'//" \ >"${TEMP_DIR}/${user}.sql" args="${args} --from-file=${user}=${TEMP_DIR}/${user}.sql" done kubectl create configmap -n "${NAMESPACE}" 02-check-users $args kubectl get configmap -n "${NAMESPACE}" 02-check-users -o yaml] logger.go:42: 15:10:33 | users/2-check-users | + source ../../functions logger.go:42: 15:10:33 | users/2-check-users | +++ realpath ../../.. logger.go:42: 15:10:33 | users/2-check-users | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:10:33 | users/2-check-users | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:10:33 | users/2-check-users | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:10:33 | users/2-check-users | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:10:33 | users/2-check-users | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:10:33 | users/2-check-users | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:10:33 | users/2-check-users | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:10:33 | users/2-check-users | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:10:33 | users/2-check-users | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:10:33 | users/2-check-users | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:10:33 | users/2-check-users | ++++ mktemp -d logger.go:42: 15:10:33 | users/2-check-users | +++ export TEMP_DIR=/tmp/tmp.kJ316hhQMv logger.go:42: 15:10:33 | users/2-check-users | +++ TEMP_DIR=/tmp/tmp.kJ316hhQMv logger.go:42: 15:10:33 | users/2-check-users | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:10:33 | users/2-check-users | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:10:33 | users/2-check-users | +++ GIT_BRANCH=PR-424 logger.go:42: 15:10:33 | users/2-check-users | +++ export VERSION=PR-424-70568ae logger.go:42: 15:10:33 | users/2-check-users | +++ VERSION=PR-424-70568ae logger.go:42: 15:10:33 | users/2-check-users | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:10:33 | users/2-check-users | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:10:33 | users/2-check-users | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:10:33 | users/2-check-users | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:10:33 | users/2-check-users | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:10:33 | users/2-check-users | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:10:33 | users/2-check-users | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:10:33 | users/2-check-users | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:10:33 | users/2-check-users | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:10:33 | users/2-check-users | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:10:33 | users/2-check-users | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:10:33 | users/2-check-users | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:10:33 | users/2-check-users | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:10:33 | users/2-check-users | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:10:33 | users/2-check-users | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:10:33 | users/2-check-users | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:10:33 | users/2-check-users | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:10:33 | users/2-check-users | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:10:33 | users/2-check-users | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:10:33 | users/2-check-users | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:10:33 | users/2-check-users | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:10:33 | users/2-check-users | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:10:33 | users/2-check-users | ++++ which gdate logger.go:42: 15:10:33 | users/2-check-users | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:10:33 | users/2-check-users | ++++ which date logger.go:42: 15:10:33 | users/2-check-users | +++ date=/usr/bin/date logger.go:42: 15:10:33 | users/2-check-users | +++ command -v oc logger.go:42: 15:10:33 | users/2-check-users | +++ oc get projects logger.go:42: 15:10:38 | users/2-check-users | error: the server doesn't have a resource type "projects" logger.go:42: 15:10:38 | users/2-check-users | +++ grep '^minikube' logger.go:42: 15:10:38 | users/2-check-users | +++ kubectl get nodes logger.go:42: 15:10:39 | users/2-check-users | ++++ pwd logger.go:42: 15:10:39 | users/2-check-users | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/users logger.go:42: 15:10:39 | users/2-check-users | ++ test_name=users logger.go:42: 15:10:39 | users/2-check-users | +++ get_cluster_name logger.go:42: 15:10:39 | users/2-check-users | +++ kubectl -n kuttl-test-worthy-amoeba get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:10:40 | users/2-check-users | ++ get_haproxy_svc users logger.go:42: 15:10:40 | users/2-check-users | ++ local cluster=users logger.go:42: 15:10:40 | users/2-check-users | ++ echo users-haproxy logger.go:42: 15:10:40 | users/2-check-users | + mysql_args='-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:40 | users/2-check-users | + users=($(get_mysql_users "${mysql_args}")) logger.go:42: 15:10:40 | users/2-check-users | ++ get_mysql_users '-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:40 | users/2-check-users | ++ local 'args=-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:40 | users/2-check-users | ++ run_mysql 'SELECT user FROM mysql.user' '-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:40 | users/2-check-users | ++ grep -vE 'mysql|root' logger.go:42: 15:10:40 | users/2-check-users | ++ local 'command=SELECT user FROM mysql.user' logger.go:42: 15:10:40 | users/2-check-users | ++ local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:40 | users/2-check-users | ++ local pod= logger.go:42: 15:10:40 | users/2-check-users | +++ get_client_pod logger.go:42: 15:10:40 | users/2-check-users | +++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:10:40 | users/2-check-users | ++ client_pod=mysql-client logger.go:42: 15:10:40 | users/2-check-users | ++ wait_pod mysql-client logger.go:42: 15:10:40 | users/2-check-users | ++ local pod=mysql-client logger.go:42: 15:10:40 | users/2-check-users | ++ set +o xtrace logger.go:42: 15:10:41 | users/2-check-users | mysql-clienttrue logger.go:42: 15:10:41 | users/2-check-users | ++ sed -e 's/mysql: //' logger.go:42: 15:10:41 | users/2-check-users | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:10:41 | users/2-check-users | ++ kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT user FROM mysql.user" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 15:10:43 | users/2-check-users | + args= logger.go:42: 15:10:43 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 15:10:43 | users/2-check-users | + host=% logger.go:42: 15:10:43 | users/2-check-users | + case $user in logger.go:42: 15:10:43 | users/2-check-users | + query='SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';' logger.go:42: 15:10:43 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:43 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';' logger.go:42: 15:10:43 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:43 | users/2-check-users | + local pod= logger.go:42: 15:10:43 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 15:10:43 | users/2-check-users | ++ get_client_pod logger.go:42: 15:10:43 | users/2-check-users | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:10:43 | users/2-check-users | + client_pod=mysql-client logger.go:42: 15:10:43 | users/2-check-users | + wait_pod mysql-client logger.go:42: 15:10:43 | users/2-check-users | + local pod=mysql-client logger.go:42: 15:10:43 | users/2-check-users | + set +o xtrace logger.go:42: 15:10:44 | users/2-check-users | mysql-clienttrue logger.go:42: 15:10:44 | users/2-check-users | + kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''monitor'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 15:10:44 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 15:10:44 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:10:45 | users/2-check-users | + args=' --from-file=monitor=/tmp/tmp.kJ316hhQMv/monitor.sql' logger.go:42: 15:10:45 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 15:10:45 | users/2-check-users | + host=% logger.go:42: 15:10:45 | users/2-check-users | + case $user in logger.go:42: 15:10:45 | users/2-check-users | + query='SHOW GRANTS FOR '\''operator'\''@'\''%'\'';' logger.go:42: 15:10:45 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''operator'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:45 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''operator'\''@'\''%'\'';' logger.go:42: 15:10:45 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:45 | users/2-check-users | + local pod= logger.go:42: 15:10:45 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 15:10:45 | users/2-check-users | ++ get_client_pod logger.go:42: 15:10:45 | users/2-check-users | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:10:46 | users/2-check-users | + client_pod=mysql-client logger.go:42: 15:10:46 | users/2-check-users | + wait_pod mysql-client logger.go:42: 15:10:46 | users/2-check-users | + local pod=mysql-client logger.go:42: 15:10:46 | users/2-check-users | + set +o xtrace logger.go:42: 15:10:47 | users/2-check-users | mysql-clienttrue logger.go:42: 15:10:47 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 15:10:47 | users/2-check-users | + kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''operator'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 15:10:47 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:10:48 | users/2-check-users | + args=' --from-file=monitor=/tmp/tmp.kJ316hhQMv/monitor.sql --from-file=operator=/tmp/tmp.kJ316hhQMv/operator.sql' logger.go:42: 15:10:48 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 15:10:48 | users/2-check-users | + host=% logger.go:42: 15:10:48 | users/2-check-users | + case $user in logger.go:42: 15:10:48 | users/2-check-users | + query='SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';' logger.go:42: 15:10:48 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:48 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';' logger.go:42: 15:10:48 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:48 | users/2-check-users | + local pod= logger.go:42: 15:10:48 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 15:10:48 | users/2-check-users | ++ get_client_pod logger.go:42: 15:10:48 | users/2-check-users | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:10:49 | users/2-check-users | + client_pod=mysql-client logger.go:42: 15:10:49 | users/2-check-users | + wait_pod mysql-client logger.go:42: 15:10:49 | users/2-check-users | + local pod=mysql-client logger.go:42: 15:10:49 | users/2-check-users | + set +o xtrace logger.go:42: 15:10:50 | users/2-check-users | mysql-clienttrue logger.go:42: 15:10:50 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 15:10:50 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:10:50 | users/2-check-users | + kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''orchestrator'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 15:10:51 | users/2-check-users | + args=' --from-file=monitor=/tmp/tmp.kJ316hhQMv/monitor.sql --from-file=operator=/tmp/tmp.kJ316hhQMv/operator.sql --from-file=orchestrator=/tmp/tmp.kJ316hhQMv/orchestrator.sql' logger.go:42: 15:10:51 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 15:10:51 | users/2-check-users | + host=% logger.go:42: 15:10:51 | users/2-check-users | + case $user in logger.go:42: 15:10:51 | users/2-check-users | + query='SHOW GRANTS FOR '\''replication'\''@'\''%'\'';' logger.go:42: 15:10:51 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''replication'\''@'\''%'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:51 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''replication'\''@'\''%'\'';' logger.go:42: 15:10:51 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 15:10:51 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:51 | users/2-check-users | + local pod= logger.go:42: 15:10:51 | users/2-check-users | ++ get_client_pod logger.go:42: 15:10:51 | users/2-check-users | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:10:52 | users/2-check-users | + client_pod=mysql-client logger.go:42: 15:10:52 | users/2-check-users | + wait_pod mysql-client logger.go:42: 15:10:52 | users/2-check-users | + local pod=mysql-client logger.go:42: 15:10:52 | users/2-check-users | + set +o xtrace logger.go:42: 15:10:53 | users/2-check-users | mysql-clienttrue logger.go:42: 15:10:53 | users/2-check-users | + kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''replication'\''@'\''%'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 15:10:53 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 15:10:53 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:10:54 | users/2-check-users | + args=' --from-file=monitor=/tmp/tmp.kJ316hhQMv/monitor.sql --from-file=operator=/tmp/tmp.kJ316hhQMv/operator.sql --from-file=orchestrator=/tmp/tmp.kJ316hhQMv/orchestrator.sql --from-file=replication=/tmp/tmp.kJ316hhQMv/replication.sql' logger.go:42: 15:10:54 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 15:10:54 | users/2-check-users | + host=% logger.go:42: 15:10:54 | users/2-check-users | + case $user in logger.go:42: 15:10:54 | users/2-check-users | + host=localhost logger.go:42: 15:10:54 | users/2-check-users | + query='SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';' logger.go:42: 15:10:54 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 15:10:54 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:54 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';' logger.go:42: 15:10:54 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:54 | users/2-check-users | + local pod= logger.go:42: 15:10:54 | users/2-check-users | ++ get_client_pod logger.go:42: 15:10:54 | users/2-check-users | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:10:55 | users/2-check-users | + client_pod=mysql-client logger.go:42: 15:10:55 | users/2-check-users | + wait_pod mysql-client logger.go:42: 15:10:55 | users/2-check-users | + local pod=mysql-client logger.go:42: 15:10:55 | users/2-check-users | + set +o xtrace logger.go:42: 15:10:56 | users/2-check-users | mysql-clienttrue logger.go:42: 15:10:56 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 15:10:56 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:10:56 | users/2-check-users | + kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''heartbeat'\''@'\''localhost'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 15:10:57 | users/2-check-users | + args=' --from-file=monitor=/tmp/tmp.kJ316hhQMv/monitor.sql --from-file=operator=/tmp/tmp.kJ316hhQMv/operator.sql --from-file=orchestrator=/tmp/tmp.kJ316hhQMv/orchestrator.sql --from-file=replication=/tmp/tmp.kJ316hhQMv/replication.sql --from-file=heartbeat=/tmp/tmp.kJ316hhQMv/heartbeat.sql' logger.go:42: 15:10:57 | users/2-check-users | + for user in '"${users[@]}"' logger.go:42: 15:10:57 | users/2-check-users | + host=% logger.go:42: 15:10:57 | users/2-check-users | + case $user in logger.go:42: 15:10:57 | users/2-check-users | + host=localhost logger.go:42: 15:10:57 | users/2-check-users | + query='SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';' logger.go:42: 15:10:57 | users/2-check-users | + sed -E 's/'\''(10|192)[.][0-9][^'\'']*'\''//; s/'\''[^'\'']*[.]internal'\''//' logger.go:42: 15:10:57 | users/2-check-users | + run_mysql 'SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';' '-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:57 | users/2-check-users | + local 'command=SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';' logger.go:42: 15:10:57 | users/2-check-users | + local 'uri=-h users-haproxy -uroot -proot_password' logger.go:42: 15:10:57 | users/2-check-users | + local pod= logger.go:42: 15:10:57 | users/2-check-users | ++ get_client_pod logger.go:42: 15:10:57 | users/2-check-users | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:10:58 | users/2-check-users | + client_pod=mysql-client logger.go:42: 15:10:58 | users/2-check-users | + wait_pod mysql-client logger.go:42: 15:10:58 | users/2-check-users | + local pod=mysql-client logger.go:42: 15:10:58 | users/2-check-users | + set +o xtrace logger.go:42: 15:10:59 | users/2-check-users | mysql-clienttrue logger.go:42: 15:10:59 | users/2-check-users | + kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW GRANTS FOR '\''xtrabackup'\''@'\''localhost'\'';" | mysql -sN -h users-haproxy -uroot -proot_password' logger.go:42: 15:10:59 | users/2-check-users | + sed -e 's/mysql: //' logger.go:42: 15:10:59 | users/2-check-users | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:11:00 | users/2-check-users | + args=' --from-file=monitor=/tmp/tmp.kJ316hhQMv/monitor.sql --from-file=operator=/tmp/tmp.kJ316hhQMv/operator.sql --from-file=orchestrator=/tmp/tmp.kJ316hhQMv/orchestrator.sql --from-file=replication=/tmp/tmp.kJ316hhQMv/replication.sql --from-file=heartbeat=/tmp/tmp.kJ316hhQMv/heartbeat.sql --from-file=xtrabackup=/tmp/tmp.kJ316hhQMv/xtrabackup.sql' logger.go:42: 15:11:00 | users/2-check-users | + kubectl create configmap -n kuttl-test-worthy-amoeba 02-check-users --from-file=monitor=/tmp/tmp.kJ316hhQMv/monitor.sql --from-file=operator=/tmp/tmp.kJ316hhQMv/operator.sql --from-file=orchestrator=/tmp/tmp.kJ316hhQMv/orchestrator.sql --from-file=replication=/tmp/tmp.kJ316hhQMv/replication.sql --from-file=heartbeat=/tmp/tmp.kJ316hhQMv/heartbeat.sql --from-file=xtrabackup=/tmp/tmp.kJ316hhQMv/xtrabackup.sql logger.go:42: 15:11:01 | users/2-check-users | configmap/02-check-users created logger.go:42: 15:11:01 | users/2-check-users | + kubectl get configmap -n kuttl-test-worthy-amoeba 02-check-users -o yaml logger.go:42: 15:11:02 | users/2-check-users | apiVersion: v1 logger.go:42: 15:11:02 | users/2-check-users | data: logger.go:42: 15:11:02 | users/2-check-users | heartbeat: | logger.go:42: 15:11:02 | users/2-check-users | GRANT REPLICATION CLIENT ON *.* TO `heartbeat`@`localhost` logger.go:42: 15:11:02 | users/2-check-users | GRANT SYSTEM_USER ON *.* TO `heartbeat`@`localhost` logger.go:42: 15:11:02 | users/2-check-users | GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ON `sys_operator`.`heartbeat` TO `heartbeat`@`localhost` logger.go:42: 15:11:02 | users/2-check-users | monitor: | logger.go:42: 15:11:02 | users/2-check-users | GRANT SELECT, RELOAD, PROCESS, SUPER, REPLICATION CLIENT ON *.* TO `monitor`@`%` logger.go:42: 15:11:02 | users/2-check-users | GRANT BACKUP_ADMIN,SERVICE_CONNECTION_ADMIN,SYSTEM_USER ON *.* TO `monitor`@`%` logger.go:42: 15:11:02 | users/2-check-users | GRANT SELECT ON `performance_schema`.* TO `monitor`@`%` logger.go:42: 15:11:02 | users/2-check-users | operator: | logger.go:42: 15:11:02 | users/2-check-users | GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, SUPER, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, CREATE ROLE, DROP ROLE ON *.* TO `operator`@`%` WITH GRANT OPTION logger.go:42: 15:11:02 | users/2-check-users | GRANT APPLICATION_PASSWORD_ADMIN,AUDIT_ABORT_EXEMPT,AUDIT_ADMIN,AUTHENTICATION_POLICY_ADMIN,BACKUP_ADMIN,BINLOG_ADMIN,BINLOG_ENCRYPTION_ADMIN,CLONE_ADMIN,CONNECTION_ADMIN,ENCRYPTION_KEY_ADMIN,FIREWALL_EXEMPT,FLUSH_OPTIMIZER_COSTS,FLUSH_STATUS,FLUSH_TABLES,FLUSH_USER_RESOURCES,GROUP_REPLICATION_ADMIN,GROUP_REPLICATION_STREAM,INNODB_REDO_LOG_ARCHIVE,INNODB_REDO_LOG_ENABLE,PASSWORDLESS_USER_ADMIN,PERSIST_RO_VARIABLES_ADMIN,REPLICATION_APPLIER,REPLICATION_SLAVE_ADMIN,RESOURCE_GROUP_ADMIN,RESOURCE_GROUP_USER,ROLE_ADMIN,SENSITIVE_VARIABLES_OBSERVER,SERVICE_CONNECTION_ADMIN,SESSION_VARIABLES_ADMIN,SET_USER_ID,SHOW_ROUTINE,SYSTEM_USER,SYSTEM_VARIABLES_ADMIN,TABLE_ENCRYPTION_ADMIN,TELEMETRY_LOG_ADMIN,XA_RECOVER_ADMIN ON *.* TO `operator`@`%` WITH GRANT OPTION logger.go:42: 15:11:02 | users/2-check-users | orchestrator: | logger.go:42: 15:11:02 | users/2-check-users | GRANT RELOAD, PROCESS, SUPER, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO `orchestrator`@`%` logger.go:42: 15:11:02 | users/2-check-users | GRANT SYSTEM_USER ON *.* TO `orchestrator`@`%` logger.go:42: 15:11:02 | users/2-check-users | GRANT SELECT ON `sys_operator`.* TO `orchestrator`@`%` logger.go:42: 15:11:02 | users/2-check-users | GRANT SELECT ON `mysql`.`slave_master_info` TO `orchestrator`@`%` logger.go:42: 15:11:02 | users/2-check-users | replication: | logger.go:42: 15:11:02 | users/2-check-users | GRANT SELECT, RELOAD, SHUTDOWN, PROCESS, FILE, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE USER ON *.* TO `replication`@`%` WITH GRANT OPTION logger.go:42: 15:11:02 | users/2-check-users | GRANT BACKUP_ADMIN,CLONE_ADMIN,CONNECTION_ADMIN,GROUP_REPLICATION_ADMIN,GROUP_REPLICATION_STREAM,PERSIST_RO_VARIABLES_ADMIN,REPLICATION_APPLIER,REPLICATION_SLAVE_ADMIN,ROLE_ADMIN,SYSTEM_USER,SYSTEM_VARIABLES_ADMIN ON *.* TO `replication`@`%` WITH GRANT OPTION logger.go:42: 15:11:02 | users/2-check-users | GRANT INSERT, UPDATE, DELETE ON `mysql`.* TO `replication`@`%` WITH GRANT OPTION logger.go:42: 15:11:02 | users/2-check-users | GRANT SELECT ON `performance_schema`.`threads` TO `replication`@`%` logger.go:42: 15:11:02 | users/2-check-users | xtrabackup: | logger.go:42: 15:11:02 | users/2-check-users | GRANT RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT ON *.* TO `xtrabackup`@`localhost` logger.go:42: 15:11:02 | users/2-check-users | GRANT BACKUP_ADMIN,GROUP_REPLICATION_ADMIN,REPLICATION_SLAVE_ADMIN,SYSTEM_USER ON *.* TO `xtrabackup`@`localhost` logger.go:42: 15:11:02 | users/2-check-users | GRANT SELECT ON `performance_schema`.`keyring_component_status` TO `xtrabackup`@`localhost` logger.go:42: 15:11:02 | users/2-check-users | GRANT SELECT ON `performance_schema`.`log_status` TO `xtrabackup`@`localhost` logger.go:42: 15:11:02 | users/2-check-users | GRANT SELECT ON `performance_schema`.`replication_group_members` TO `xtrabackup`@`localhost` logger.go:42: 15:11:02 | users/2-check-users | kind: ConfigMap logger.go:42: 15:11:02 | users/2-check-users | metadata: logger.go:42: 15:11:02 | users/2-check-users | creationTimestamp: "2023-08-17T15:11:01Z" logger.go:42: 15:11:02 | users/2-check-users | name: 02-check-users logger.go:42: 15:11:02 | users/2-check-users | namespace: kuttl-test-worthy-amoeba logger.go:42: 15:11:02 | users/2-check-users | resourceVersion: "38658" logger.go:42: 15:11:02 | users/2-check-users | uid: 78e83a92-2f1d-4250-8690-b848292fb1d4 logger.go:42: 15:11:03 | users/2-check-users | test step completed 2-check-users logger.go:42: 15:11:03 | users/3-update-passwords | starting test step 3-update-passwords logger.go:42: 15:11:04 | users/3-update-passwords | Secret:kuttl-test-worthy-amoeba/test-secrets updated logger.go:42: 15:11:12 | users/3-update-passwords | test step completed 3-update-passwords logger.go:42: 15:11:12 | users/4-check-cluster | starting test step 4-check-cluster logger.go:42: 15:11:12 | users/4-check-cluster | running command: [sh -c set -o pipefail set -o errexit set -o xtrace source ../../functions sleep 30 # wait for cluster status to change to initializing wait_cluster_consistency_async "${test_name}" "3" "3" mysql_args="-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password_updated" users=($(get_mysql_users "${mysql_args}")) # check connection args="" set +o errexit for user in "${users[@]}"; do mysql_args="-h $(get_haproxy_svc $(get_cluster_name)) -u${user} -p${user}_password_updated" pod=mysql-client case $user in heartbeat | xtrabackup) mysql_args="-h localhost -u${user} -p${user}_password_updated" pod="$(get_cluster_name)-mysql-0" ;; esac run_mysql "SELECT 1" "${mysql_args}" "${pod}" args="${args} --from-literal=${user}=$([ $? -eq 0 ] && echo 'success' || echo 'fail')" done set -o errexit kubectl create configmap -n "${NAMESPACE}" 04-check-connections $args kubectl get configmap -n "${NAMESPACE}" 04-check-connections -o yaml # check replication wait_cluster_consistency_async "${test_name}" "3" "3" orc_host=$(get_orc_headless_fqdn $(get_cluster_name) 0) cluster=$(run_curl "http://${orc_host}:3000/api/clusters/" | jq -r .[0]) replicating=$(run_curl "http://${orc_host}:3000/api/cluster/${cluster}/" \ | tee \ | jq -r '.[] | "\(.ReplicationSQLThreadRuning) \(.ReplicationIOThreadRuning)"' \ | grep "true" \ | wc -l \ | sed 's/ *//') kubectl create configmap -n "${NAMESPACE}" 04-check-replication --from-literal=replicating="${replicating}"] logger.go:42: 15:11:12 | users/4-check-cluster | + source ../../functions logger.go:42: 15:11:12 | users/4-check-cluster | +++ realpath ../../.. logger.go:42: 15:11:12 | users/4-check-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:11:12 | users/4-check-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:11:12 | users/4-check-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:11:12 | users/4-check-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:11:12 | users/4-check-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:11:12 | users/4-check-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:11:12 | users/4-check-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:11:12 | users/4-check-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:11:12 | users/4-check-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:11:12 | users/4-check-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:11:12 | users/4-check-cluster | ++++ mktemp -d logger.go:42: 15:11:12 | users/4-check-cluster | +++ export TEMP_DIR=/tmp/tmp.9fxIRywcfA logger.go:42: 15:11:12 | users/4-check-cluster | +++ TEMP_DIR=/tmp/tmp.9fxIRywcfA logger.go:42: 15:11:12 | users/4-check-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:11:12 | users/4-check-cluster | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:11:12 | users/4-check-cluster | +++ GIT_BRANCH=PR-424 logger.go:42: 15:11:12 | users/4-check-cluster | +++ export VERSION=PR-424-70568ae logger.go:42: 15:11:12 | users/4-check-cluster | +++ VERSION=PR-424-70568ae logger.go:42: 15:11:12 | users/4-check-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:11:12 | users/4-check-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:11:12 | users/4-check-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:11:12 | users/4-check-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:11:12 | users/4-check-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:11:12 | users/4-check-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:11:12 | users/4-check-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:11:12 | users/4-check-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:11:12 | users/4-check-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:11:12 | users/4-check-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:11:12 | users/4-check-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:11:12 | users/4-check-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:11:12 | users/4-check-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:11:12 | users/4-check-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:11:12 | users/4-check-cluster | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:11:12 | users/4-check-cluster | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:11:12 | users/4-check-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:11:12 | users/4-check-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:11:12 | users/4-check-cluster | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:11:12 | users/4-check-cluster | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:11:12 | users/4-check-cluster | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:11:12 | users/4-check-cluster | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:11:12 | users/4-check-cluster | ++++ which gdate logger.go:42: 15:11:12 | users/4-check-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:11:12 | users/4-check-cluster | ++++ which date logger.go:42: 15:11:12 | users/4-check-cluster | +++ date=/usr/bin/date logger.go:42: 15:11:12 | users/4-check-cluster | +++ command -v oc logger.go:42: 15:11:12 | users/4-check-cluster | +++ oc get projects logger.go:42: 15:11:18 | users/4-check-cluster | error: the server doesn't have a resource type "projects" logger.go:42: 15:11:18 | users/4-check-cluster | +++ kubectl get nodes logger.go:42: 15:11:18 | users/4-check-cluster | +++ grep '^minikube' logger.go:42: 15:11:19 | users/4-check-cluster | ++++ pwd logger.go:42: 15:11:19 | users/4-check-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/users logger.go:42: 15:11:19 | users/4-check-cluster | ++ test_name=users logger.go:42: 15:11:19 | users/4-check-cluster | + sleep 30 logger.go:42: 15:11:49 | users/4-check-cluster | + wait_cluster_consistency_async users 3 3 logger.go:42: 15:11:49 | users/4-check-cluster | + local cluster_name=users logger.go:42: 15:11:49 | users/4-check-cluster | + local cluster_size=3 logger.go:42: 15:11:49 | users/4-check-cluster | + local orc_size=3 logger.go:42: 15:11:49 | users/4-check-cluster | + '[' -z 3 ']' logger.go:42: 15:11:49 | users/4-check-cluster | + sleep 7 logger.go:42: 15:11:56 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:11:56 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:11:56 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:11:57 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:11:57 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:11:58 | users/4-check-cluster | + [[ '' == \3 ]] logger.go:42: 15:11:58 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:11:58 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 15:11:58 | users/4-check-cluster | + sleep 15 logger.go:42: 15:12:13 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:12:14 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:12:14 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:12:14 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:12:14 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:12:15 | users/4-check-cluster | + [[ '' == \3 ]] logger.go:42: 15:12:15 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:12:15 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 15:12:15 | users/4-check-cluster | + sleep 15 logger.go:42: 15:12:30 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:12:31 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:12:31 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:12:32 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:12:32 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:12:32 | users/4-check-cluster | + [[ '' == \3 ]] logger.go:42: 15:12:32 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:12:32 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 15:12:32 | users/4-check-cluster | + sleep 15 logger.go:42: 15:12:47 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:12:48 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:12:48 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:12:49 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:12:49 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:12:50 | users/4-check-cluster | + [[ '' == \3 ]] logger.go:42: 15:12:50 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:12:50 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 15:12:50 | users/4-check-cluster | + sleep 15 logger.go:42: 15:13:05 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:13:05 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:13:05 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:13:06 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:13:06 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:13:07 | users/4-check-cluster | + [[ '' == \3 ]] logger.go:42: 15:13:07 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:13:07 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 15:13:07 | users/4-check-cluster | + sleep 15 logger.go:42: 15:13:22 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:13:23 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:13:23 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:13:23 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:13:23 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:13:24 | users/4-check-cluster | + [[ '' == \3 ]] logger.go:42: 15:13:24 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:13:24 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 15:13:24 | users/4-check-cluster | + sleep 15 logger.go:42: 15:13:39 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:13:40 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:13:40 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:13:40 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:13:40 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:13:41 | users/4-check-cluster | + [[ '' == \3 ]] logger.go:42: 15:13:41 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:13:41 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 15:13:41 | users/4-check-cluster | + sleep 15 logger.go:42: 15:13:56 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:13:57 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:13:57 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:13:58 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:13:58 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:13:59 | users/4-check-cluster | + [[ '' == \3 ]] logger.go:42: 15:13:59 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:13:59 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 15:13:59 | users/4-check-cluster | + sleep 15 logger.go:42: 15:14:14 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:14:14 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:14:14 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:14:15 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:14:15 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:14:16 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 15:14:16 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:14:16 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 15:14:16 | users/4-check-cluster | + sleep 15 logger.go:42: 15:14:31 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:14:32 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:14:32 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:14:32 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:14:32 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:14:33 | users/4-check-cluster | + [[ 2 == \3 ]] logger.go:42: 15:14:33 | users/4-check-cluster | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:14:33 | users/4-check-cluster | waiting for cluster readyness (async) logger.go:42: 15:14:33 | users/4-check-cluster | + sleep 15 logger.go:42: 15:14:48 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:14:49 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:14:49 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:14:49 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:14:49 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:14:50 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:14:50 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.state}' logger.go:42: 15:14:51 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:14:51 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.state}' logger.go:42: 15:14:52 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:14:52 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 15:14:52 | users/4-check-cluster | +++ kubectl -n kuttl-test-worthy-amoeba get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:14:52 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 15:14:52 | users/4-check-cluster | ++ local cluster=users logger.go:42: 15:14:52 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 15:14:52 | users/4-check-cluster | + mysql_args='-h users-haproxy -uroot -proot_password_updated' logger.go:42: 15:14:52 | users/4-check-cluster | + users=($(get_mysql_users "${mysql_args}")) logger.go:42: 15:14:52 | users/4-check-cluster | ++ get_mysql_users '-h users-haproxy -uroot -proot_password_updated' logger.go:42: 15:14:52 | users/4-check-cluster | ++ local 'args=-h users-haproxy -uroot -proot_password_updated' logger.go:42: 15:14:52 | users/4-check-cluster | ++ run_mysql 'SELECT user FROM mysql.user' '-h users-haproxy -uroot -proot_password_updated' logger.go:42: 15:14:52 | users/4-check-cluster | ++ grep -vE 'mysql|root' logger.go:42: 15:14:52 | users/4-check-cluster | ++ local 'command=SELECT user FROM mysql.user' logger.go:42: 15:14:52 | users/4-check-cluster | ++ local 'uri=-h users-haproxy -uroot -proot_password_updated' logger.go:42: 15:14:52 | users/4-check-cluster | ++ local pod= logger.go:42: 15:14:52 | users/4-check-cluster | +++ get_client_pod logger.go:42: 15:14:52 | users/4-check-cluster | +++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:14:53 | users/4-check-cluster | ++ client_pod=mysql-client logger.go:42: 15:14:53 | users/4-check-cluster | ++ wait_pod mysql-client logger.go:42: 15:14:53 | users/4-check-cluster | ++ local pod=mysql-client logger.go:42: 15:14:53 | users/4-check-cluster | ++ set +o xtrace logger.go:42: 15:14:54 | users/4-check-cluster | mysql-clienttrue logger.go:42: 15:14:54 | users/4-check-cluster | ++ kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT user FROM mysql.user" | mysql -sN -h users-haproxy -uroot -proot_password_updated' logger.go:42: 15:14:54 | users/4-check-cluster | ++ sed -e 's/mysql: //' logger.go:42: 15:14:54 | users/4-check-cluster | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:14:55 | users/4-check-cluster | + args= logger.go:42: 15:14:55 | users/4-check-cluster | + set +o errexit logger.go:42: 15:14:55 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 15:14:55 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 15:14:55 | users/4-check-cluster | +++ kubectl -n kuttl-test-worthy-amoeba get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:14:56 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 15:14:56 | users/4-check-cluster | ++ local cluster=users logger.go:42: 15:14:56 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 15:14:56 | users/4-check-cluster | + mysql_args='-h users-haproxy -umonitor -pmonitor_password_updated' logger.go:42: 15:14:56 | users/4-check-cluster | + pod=mysql-client logger.go:42: 15:14:56 | users/4-check-cluster | + case $user in logger.go:42: 15:14:56 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -umonitor -pmonitor_password_updated' mysql-client logger.go:42: 15:14:56 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 15:14:56 | users/4-check-cluster | + local 'uri=-h users-haproxy -umonitor -pmonitor_password_updated' logger.go:42: 15:14:56 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 15:14:56 | users/4-check-cluster | ++ get_client_pod logger.go:42: 15:14:56 | users/4-check-cluster | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:14:57 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 15:14:57 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 15:14:57 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 15:14:57 | users/4-check-cluster | + set +o xtrace logger.go:42: 15:14:57 | users/4-check-cluster | mysql-clienttrue logger.go:42: 15:14:57 | users/4-check-cluster | + kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -umonitor -pmonitor_password_updated' logger.go:42: 15:14:57 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 15:14:57 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:14:59 | users/4-check-cluster | 1 logger.go:42: 15:14:59 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 15:14:59 | users/4-check-cluster | ++ echo success logger.go:42: 15:14:59 | users/4-check-cluster | + args=' --from-literal=monitor=success' logger.go:42: 15:14:59 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 15:14:59 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 15:14:59 | users/4-check-cluster | +++ kubectl -n kuttl-test-worthy-amoeba get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:15:00 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 15:15:00 | users/4-check-cluster | ++ local cluster=users logger.go:42: 15:15:00 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 15:15:00 | users/4-check-cluster | + mysql_args='-h users-haproxy -uoperator -poperator_password_updated' logger.go:42: 15:15:00 | users/4-check-cluster | + pod=mysql-client logger.go:42: 15:15:00 | users/4-check-cluster | + case $user in logger.go:42: 15:15:00 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -uoperator -poperator_password_updated' mysql-client logger.go:42: 15:15:00 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 15:15:00 | users/4-check-cluster | + local 'uri=-h users-haproxy -uoperator -poperator_password_updated' logger.go:42: 15:15:00 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 15:15:00 | users/4-check-cluster | ++ get_client_pod logger.go:42: 15:15:00 | users/4-check-cluster | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:15:00 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 15:15:00 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 15:15:00 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 15:15:00 | users/4-check-cluster | + set +o xtrace logger.go:42: 15:15:01 | users/4-check-cluster | mysql-clienttrue logger.go:42: 15:15:01 | users/4-check-cluster | + kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -uoperator -poperator_password_updated' logger.go:42: 15:15:01 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 15:15:01 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:15:03 | users/4-check-cluster | 1 logger.go:42: 15:15:03 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 15:15:03 | users/4-check-cluster | ++ echo success logger.go:42: 15:15:03 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success' logger.go:42: 15:15:03 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 15:15:03 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 15:15:03 | users/4-check-cluster | +++ kubectl -n kuttl-test-worthy-amoeba get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:15:03 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 15:15:03 | users/4-check-cluster | ++ local cluster=users logger.go:42: 15:15:03 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 15:15:03 | users/4-check-cluster | + mysql_args='-h users-haproxy -uorchestrator -porchestrator_password_updated' logger.go:42: 15:15:03 | users/4-check-cluster | + pod=mysql-client logger.go:42: 15:15:03 | users/4-check-cluster | + case $user in logger.go:42: 15:15:03 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -uorchestrator -porchestrator_password_updated' mysql-client logger.go:42: 15:15:03 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 15:15:03 | users/4-check-cluster | + local 'uri=-h users-haproxy -uorchestrator -porchestrator_password_updated' logger.go:42: 15:15:03 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 15:15:03 | users/4-check-cluster | ++ get_client_pod logger.go:42: 15:15:03 | users/4-check-cluster | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:15:04 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 15:15:04 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 15:15:04 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 15:15:04 | users/4-check-cluster | + set +o xtrace logger.go:42: 15:15:05 | users/4-check-cluster | mysql-clienttrue logger.go:42: 15:15:05 | users/4-check-cluster | + kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -uorchestrator -porchestrator_password_updated' logger.go:42: 15:15:05 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 15:15:05 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:15:06 | users/4-check-cluster | 1 logger.go:42: 15:15:06 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 15:15:06 | users/4-check-cluster | ++ echo success logger.go:42: 15:15:06 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success' logger.go:42: 15:15:06 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 15:15:06 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 15:15:06 | users/4-check-cluster | +++ kubectl -n kuttl-test-worthy-amoeba get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:15:07 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 15:15:07 | users/4-check-cluster | ++ local cluster=users logger.go:42: 15:15:07 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 15:15:07 | users/4-check-cluster | + mysql_args='-h users-haproxy -ureplication -preplication_password_updated' logger.go:42: 15:15:07 | users/4-check-cluster | + pod=mysql-client logger.go:42: 15:15:07 | users/4-check-cluster | + case $user in logger.go:42: 15:15:07 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h users-haproxy -ureplication -preplication_password_updated' mysql-client logger.go:42: 15:15:07 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 15:15:07 | users/4-check-cluster | + local 'uri=-h users-haproxy -ureplication -preplication_password_updated' logger.go:42: 15:15:07 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 15:15:07 | users/4-check-cluster | ++ get_client_pod logger.go:42: 15:15:07 | users/4-check-cluster | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:15:08 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 15:15:08 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 15:15:08 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 15:15:08 | users/4-check-cluster | + set +o xtrace logger.go:42: 15:15:08 | users/4-check-cluster | mysql-clienttrue logger.go:42: 15:15:08 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 15:15:08 | users/4-check-cluster | + kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h users-haproxy -ureplication -preplication_password_updated' logger.go:42: 15:15:08 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:15:10 | users/4-check-cluster | 1 logger.go:42: 15:15:10 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 15:15:10 | users/4-check-cluster | ++ echo success logger.go:42: 15:15:10 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success' logger.go:42: 15:15:10 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 15:15:10 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 15:15:10 | users/4-check-cluster | +++ kubectl -n kuttl-test-worthy-amoeba get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:15:11 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 15:15:11 | users/4-check-cluster | ++ local cluster=users logger.go:42: 15:15:11 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 15:15:11 | users/4-check-cluster | + mysql_args='-h users-haproxy -uheartbeat -pheartbeat_password_updated' logger.go:42: 15:15:11 | users/4-check-cluster | + pod=mysql-client logger.go:42: 15:15:11 | users/4-check-cluster | + case $user in logger.go:42: 15:15:11 | users/4-check-cluster | + mysql_args='-h localhost -uheartbeat -pheartbeat_password_updated' logger.go:42: 15:15:11 | users/4-check-cluster | ++ get_cluster_name logger.go:42: 15:15:11 | users/4-check-cluster | ++ kubectl -n kuttl-test-worthy-amoeba get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:15:11 | users/4-check-cluster | + pod=users-mysql-0 logger.go:42: 15:15:11 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h localhost -uheartbeat -pheartbeat_password_updated' users-mysql-0 logger.go:42: 15:15:11 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 15:15:11 | users/4-check-cluster | + local 'uri=-h localhost -uheartbeat -pheartbeat_password_updated' logger.go:42: 15:15:11 | users/4-check-cluster | + local pod=users-mysql-0 logger.go:42: 15:15:11 | users/4-check-cluster | ++ get_client_pod logger.go:42: 15:15:11 | users/4-check-cluster | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:15:12 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 15:15:12 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 15:15:12 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 15:15:12 | users/4-check-cluster | + set +o xtrace logger.go:42: 15:15:13 | users/4-check-cluster | mysql-clienttrue logger.go:42: 15:15:13 | users/4-check-cluster | + kubectl -n kuttl-test-worthy-amoeba exec users-mysql-0 -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h localhost -uheartbeat -pheartbeat_password_updated' logger.go:42: 15:15:13 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 15:15:13 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:15:14 | users/4-check-cluster | Defaulted container "mysql" out of: mysql, xtrabackup, pt-heartbeat, mysql-init (init) logger.go:42: 15:15:14 | users/4-check-cluster | 1 logger.go:42: 15:15:14 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 15:15:14 | users/4-check-cluster | ++ echo success logger.go:42: 15:15:14 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success --from-literal=heartbeat=success' logger.go:42: 15:15:14 | users/4-check-cluster | + for user in '"${users[@]}"' logger.go:42: 15:15:14 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 15:15:14 | users/4-check-cluster | +++ kubectl -n kuttl-test-worthy-amoeba get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:15:15 | users/4-check-cluster | ++ get_haproxy_svc users logger.go:42: 15:15:15 | users/4-check-cluster | ++ local cluster=users logger.go:42: 15:15:15 | users/4-check-cluster | ++ echo users-haproxy logger.go:42: 15:15:15 | users/4-check-cluster | + mysql_args='-h users-haproxy -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 15:15:15 | users/4-check-cluster | + pod=mysql-client logger.go:42: 15:15:15 | users/4-check-cluster | + case $user in logger.go:42: 15:15:15 | users/4-check-cluster | + mysql_args='-h localhost -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 15:15:15 | users/4-check-cluster | ++ get_cluster_name logger.go:42: 15:15:15 | users/4-check-cluster | ++ kubectl -n kuttl-test-worthy-amoeba get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:15:16 | users/4-check-cluster | + pod=users-mysql-0 logger.go:42: 15:15:16 | users/4-check-cluster | + run_mysql 'SELECT 1' '-h localhost -uxtrabackup -pxtrabackup_password_updated' users-mysql-0 logger.go:42: 15:15:16 | users/4-check-cluster | + local 'command=SELECT 1' logger.go:42: 15:15:16 | users/4-check-cluster | + local 'uri=-h localhost -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 15:15:16 | users/4-check-cluster | + local pod=users-mysql-0 logger.go:42: 15:15:16 | users/4-check-cluster | ++ get_client_pod logger.go:42: 15:15:16 | users/4-check-cluster | ++ kubectl -n kuttl-test-worthy-amoeba get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:15:16 | users/4-check-cluster | + client_pod=mysql-client logger.go:42: 15:15:16 | users/4-check-cluster | + wait_pod mysql-client logger.go:42: 15:15:16 | users/4-check-cluster | + local pod=mysql-client logger.go:42: 15:15:16 | users/4-check-cluster | + set +o xtrace logger.go:42: 15:15:17 | users/4-check-cluster | mysql-clienttrue logger.go:42: 15:15:17 | users/4-check-cluster | + sed -e 's/mysql: //' logger.go:42: 15:15:17 | users/4-check-cluster | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:15:17 | users/4-check-cluster | + kubectl -n kuttl-test-worthy-amoeba exec users-mysql-0 -- bash -c 'printf '\''%s\n'\'' "SELECT 1" | mysql -sN -h localhost -uxtrabackup -pxtrabackup_password_updated' logger.go:42: 15:15:19 | users/4-check-cluster | Defaulted container "mysql" out of: mysql, xtrabackup, pt-heartbeat, mysql-init (init) logger.go:42: 15:15:19 | users/4-check-cluster | 1 logger.go:42: 15:15:19 | users/4-check-cluster | ++ '[' 0 -eq 0 ']' logger.go:42: 15:15:19 | users/4-check-cluster | ++ echo success logger.go:42: 15:15:19 | users/4-check-cluster | + args=' --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success --from-literal=heartbeat=success --from-literal=xtrabackup=success' logger.go:42: 15:15:19 | users/4-check-cluster | + set -o errexit logger.go:42: 15:15:19 | users/4-check-cluster | + kubectl create configmap -n kuttl-test-worthy-amoeba 04-check-connections --from-literal=monitor=success --from-literal=operator=success --from-literal=orchestrator=success --from-literal=replication=success --from-literal=heartbeat=success --from-literal=xtrabackup=success logger.go:42: 15:15:19 | users/4-check-cluster | configmap/04-check-connections created logger.go:42: 15:15:19 | users/4-check-cluster | + kubectl get configmap -n kuttl-test-worthy-amoeba 04-check-connections -o yaml logger.go:42: 15:15:20 | users/4-check-cluster | apiVersion: v1 logger.go:42: 15:15:20 | users/4-check-cluster | data: logger.go:42: 15:15:20 | users/4-check-cluster | heartbeat: success logger.go:42: 15:15:20 | users/4-check-cluster | monitor: success logger.go:42: 15:15:20 | users/4-check-cluster | operator: success logger.go:42: 15:15:20 | users/4-check-cluster | orchestrator: success logger.go:42: 15:15:20 | users/4-check-cluster | replication: success logger.go:42: 15:15:20 | users/4-check-cluster | xtrabackup: success logger.go:42: 15:15:20 | users/4-check-cluster | kind: ConfigMap logger.go:42: 15:15:20 | users/4-check-cluster | metadata: logger.go:42: 15:15:20 | users/4-check-cluster | creationTimestamp: "2023-08-17T15:15:19Z" logger.go:42: 15:15:20 | users/4-check-cluster | name: 04-check-connections logger.go:42: 15:15:20 | users/4-check-cluster | namespace: kuttl-test-worthy-amoeba logger.go:42: 15:15:20 | users/4-check-cluster | resourceVersion: "40908" logger.go:42: 15:15:20 | users/4-check-cluster | uid: 6869db78-973c-490d-a63e-25f12bb67f21 logger.go:42: 15:15:20 | users/4-check-cluster | + wait_cluster_consistency_async users 3 3 logger.go:42: 15:15:20 | users/4-check-cluster | + local cluster_name=users logger.go:42: 15:15:20 | users/4-check-cluster | + local cluster_size=3 logger.go:42: 15:15:20 | users/4-check-cluster | + local orc_size=3 logger.go:42: 15:15:20 | users/4-check-cluster | + '[' -z 3 ']' logger.go:42: 15:15:20 | users/4-check-cluster | + sleep 7 logger.go:42: 15:15:27 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:15:28 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:15:28 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:15:29 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:15:29 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:15:29 | users/4-check-cluster | + [[ 3 == \3 ]] logger.go:42: 15:15:29 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.orchestrator.state}' logger.go:42: 15:15:30 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:15:30 | users/4-check-cluster | ++ kubectl get ps users -n kuttl-test-worthy-amoeba -o 'jsonpath={.status.state}' logger.go:42: 15:15:31 | users/4-check-cluster | + [[ ready == \r\e\a\d\y ]] logger.go:42: 15:15:31 | users/4-check-cluster | +++ get_cluster_name logger.go:42: 15:15:31 | users/4-check-cluster | +++ kubectl -n kuttl-test-worthy-amoeba get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:15:32 | users/4-check-cluster | ++ get_orc_headless_fqdn users 0 logger.go:42: 15:15:32 | users/4-check-cluster | ++ local cluster=users logger.go:42: 15:15:32 | users/4-check-cluster | ++ local index=0 logger.go:42: 15:15:32 | users/4-check-cluster | ++ echo users-orc-0.users-orc logger.go:42: 15:15:32 | users/4-check-cluster | + orc_host=users-orc-0.users-orc logger.go:42: 15:15:32 | users/4-check-cluster | ++ run_curl http://users-orc-0.users-orc:3000/api/clusters/ logger.go:42: 15:15:32 | users/4-check-cluster | ++ kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'curl -s -k http://users-orc-0.users-orc:3000/api/clusters/' logger.go:42: 15:15:32 | users/4-check-cluster | ++ jq -r '.[0]' logger.go:42: 15:15:33 | users/4-check-cluster | + cluster=users-mysql-0.users-mysql.kuttl-test-worthy-amoeba:3306 logger.go:42: 15:15:33 | users/4-check-cluster | ++ run_curl http://users-orc-0.users-orc:3000/api/cluster/users-mysql-0.users-mysql.kuttl-test-worthy-amoeba:3306/ logger.go:42: 15:15:33 | users/4-check-cluster | ++ tee logger.go:42: 15:15:33 | users/4-check-cluster | ++ kubectl -n kuttl-test-worthy-amoeba exec mysql-client -- bash -c 'curl -s -k http://users-orc-0.users-orc:3000/api/cluster/users-mysql-0.users-mysql.kuttl-test-worthy-amoeba:3306/' logger.go:42: 15:15:33 | users/4-check-cluster | ++ grep true logger.go:42: 15:15:33 | users/4-check-cluster | ++ jq -r '.[] | "\(.ReplicationSQLThreadRuning) \(.ReplicationIOThreadRuning)"' logger.go:42: 15:15:33 | users/4-check-cluster | ++ wc -l logger.go:42: 15:15:33 | users/4-check-cluster | ++ sed 's/ *//' logger.go:42: 15:15:35 | users/4-check-cluster | + replicating=2 logger.go:42: 15:15:35 | users/4-check-cluster | + kubectl create configmap -n kuttl-test-worthy-amoeba 04-check-replication --from-literal=replicating=2 logger.go:42: 15:15:35 | users/4-check-cluster | configmap/04-check-replication created logger.go:42: 15:15:38 | users/4-check-cluster | test step completed 4-check-cluster logger.go:42: 15:15:38 | users/5-drop-finalizer | starting test step 5-drop-finalizer logger.go:42: 15:15:39 | users/5-drop-finalizer | PerconaServerMySQL:kuttl-test-worthy-amoeba/users updated logger.go:42: 15:15:39 | users/5-drop-finalizer | test step completed 5-drop-finalizer logger.go:42: 15:15:39 | users | users events from ns kuttl-test-worthy-amoeba: logger.go:42: 15:15:39 | users | 2023-08-17 15:07:03 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-mnjlt Scheduled Successfully assigned kuttl-test-worthy-amoeba/percona-server-mysql-operator-6b56d66f99-mnjlt to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-brgf logger.go:42: 15:15:39 | users | 2023-08-17 15:07:03 +0000 UTC Normal ReplicaSet.apps percona-server-mysql-operator-6b56d66f99 SuccessfulCreate Created pod: percona-server-mysql-operator-6b56d66f99-mnjlt logger.go:42: 15:15:39 | users | 2023-08-17 15:07:03 +0000 UTC Normal Deployment.apps percona-server-mysql-operator ScalingReplicaSet Scaled up replica set percona-server-mysql-operator-6b56d66f99 to 1 logger.go:42: 15:15:39 | users | 2023-08-17 15:07:05 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-mnjlt.spec.containers{manager} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:07:06 +0000 UTC Normal Lease.coordination.k8s.io 08db2feb.percona.com LeaderElection percona-server-mysql-operator-6b56d66f99-mnjlt_dc65b311-42a8-410d-83ea-1d4492ebd36c became leader logger.go:42: 15:15:39 | users | 2023-08-17 15:07:06 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-mnjlt.spec.containers{manager} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 447.341165ms logger.go:42: 15:15:39 | users | 2023-08-17 15:07:06 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-mnjlt.spec.containers{manager} Created Created container manager logger.go:42: 15:15:39 | users | 2023-08-17 15:07:06 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-mnjlt.spec.containers{manager} Started Started container manager logger.go:42: 15:15:39 | users | 2023-08-17 15:07:07 +0000 UTC Normal Pod mysql-client Scheduled Successfully assigned kuttl-test-worthy-amoeba/mysql-client to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-4tf7 logger.go:42: 15:15:39 | users | 2023-08-17 15:07:07 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.0.25" already present on machine logger.go:42: 15:15:39 | users | 2023-08-17 15:07:07 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container mysql-client logger.go:42: 15:15:39 | users | 2023-08-17 15:07:08 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client logger.go:42: 15:15:39 | users | 2023-08-17 15:07:27 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding logger.go:42: 15:15:39 | users | 2023-08-17 15:07:27 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator logger.go:42: 15:15:39 | users | 2023-08-17 15:07:27 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-worthy-amoeba/datadir-users-mysql-0" logger.go:42: 15:15:39 | users | 2023-08-17 15:07:27 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Claim datadir-users-mysql-0 Pod users-mysql-0 in StatefulSet users-mysql success logger.go:42: 15:15:39 | users | 2023-08-17 15:07:27 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Pod users-mysql-0 in StatefulSet users-mysql successful logger.go:42: 15:15:39 | users | 2023-08-17 15:07:30 +0000 UTC Normal Pod users-orc-0 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-orc-0 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-brgf logger.go:42: 15:15:39 | users | 2023-08-17 15:07:30 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulCreate create Pod users-orc-0 in StatefulSet users-orc successful logger.go:42: 15:15:39 | users | 2023-08-17 15:07:31 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-594e0ffa-1d3b-4d1a-bcae-6f0696863989 logger.go:42: 15:15:39 | users | 2023-08-17 15:07:31 +0000 UTC Normal Pod users-mysql-0 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-mysql-0 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-qrsp logger.go:42: 15:15:39 | users | 2023-08-17 15:07:31 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:07:31 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 349.736524ms logger.go:42: 15:15:39 | users | 2023-08-17 15:07:31 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Created Created container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:07:31 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Started Started container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:07:33 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:07:34 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 396.698435ms logger.go:42: 15:15:39 | users | 2023-08-17 15:07:34 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Created Created container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:07:34 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Started Started container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:07:34 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:07:34 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 401.172656ms logger.go:42: 15:15:39 | users | 2023-08-17 15:07:34 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:07:34 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:07:38 +0000 UTC Normal Pod users-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-594e0ffa-1d3b-4d1a-bcae-6f0696863989" logger.go:42: 15:15:39 | users | 2023-08-17 15:07:39 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:07:40 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 406.000816ms logger.go:42: 15:15:39 | users | 2023-08-17 15:07:40 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init logger.go:42: 15:15:39 | users | 2023-08-17 15:07:40 +0000 UTC Normal Pod users-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init logger.go:42: 15:15:39 | users | 2023-08-17 15:07:41 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" logger.go:42: 15:15:39 | users | 2023-08-17 15:07:42 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 376.335436ms logger.go:42: 15:15:39 | users | 2023-08-17 15:07:42 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Created Created container mysql logger.go:42: 15:15:39 | users | 2023-08-17 15:07:42 +0000 UTC Normal Pod users-mysql-0.spec.containers{mysql} Started Started container mysql logger.go:42: 15:15:39 | users | 2023-08-17 15:07:42 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" logger.go:42: 15:15:39 | users | 2023-08-17 15:07:42 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 382.70411ms logger.go:42: 15:15:39 | users | 2023-08-17 15:07:42 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup logger.go:42: 15:15:39 | users | 2023-08-17 15:07:42 +0000 UTC Normal Pod users-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup logger.go:42: 15:15:39 | users | 2023-08-17 15:07:42 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" logger.go:42: 15:15:39 | users | 2023-08-17 15:07:43 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 358.220078ms logger.go:42: 15:15:39 | users | 2023-08-17 15:07:43 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat logger.go:42: 15:15:39 | users | 2023-08-17 15:07:43 +0000 UTC Normal Pod users-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat logger.go:42: 15:15:39 | users | 2023-08-17 15:08:05 +0000 UTC Normal Pod users-orc-1 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-orc-1 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-4tf7 logger.go:42: 15:15:39 | users | 2023-08-17 15:08:05 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulCreate create Pod users-orc-1 in StatefulSet users-orc successful logger.go:42: 15:15:39 | users | 2023-08-17 15:08:06 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:07 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 401.946001ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:07 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Created Created container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:07 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Started Started container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:08 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:08 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 385.805644ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:08 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Created Created container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:08:08 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Started Started container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:08:08 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:09 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 358.48499ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:09 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:08:09 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:08:14 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding logger.go:42: 15:15:39 | users | 2023-08-17 15:08:14 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator logger.go:42: 15:15:39 | users | 2023-08-17 15:08:14 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-worthy-amoeba/datadir-users-mysql-1" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:14 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Claim datadir-users-mysql-1 Pod users-mysql-1 in StatefulSet users-mysql success logger.go:42: 15:15:39 | users | 2023-08-17 15:08:14 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Pod users-mysql-1 in StatefulSet users-mysql successful logger.go:42: 15:15:39 | users | 2023-08-17 15:08:18 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-751407c1-543f-4b1f-acb1-93550d50929c logger.go:42: 15:15:39 | users | 2023-08-17 15:08:19 +0000 UTC Normal Pod users-mysql-1 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-mysql-1 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-brgf logger.go:42: 15:15:39 | users | 2023-08-17 15:08:20 +0000 UTC Normal Pod users-haproxy-0 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-haproxy-0 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-qrsp logger.go:42: 15:15:39 | users | 2023-08-17 15:08:20 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:20 +0000 UTC Normal StatefulSet.apps users-haproxy SuccessfulCreate create Pod users-haproxy-0 in StatefulSet users-haproxy successful logger.go:42: 15:15:39 | users | 2023-08-17 15:08:21 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 432.039421ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:21 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:21 +0000 UTC Normal Pod users-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:22 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:23 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 395.655184ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:23 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Created Created container haproxy logger.go:42: 15:15:39 | users | 2023-08-17 15:08:23 +0000 UTC Normal Pod users-haproxy-0.spec.containers{haproxy} Started Started container haproxy logger.go:42: 15:15:39 | users | 2023-08-17 15:08:23 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:23 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 356.407884ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:23 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:08:23 +0000 UTC Normal Pod users-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:08:23 +0000 UTC Normal Pod users-haproxy-1 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-haproxy-1 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-4tf7 logger.go:42: 15:15:39 | users | 2023-08-17 15:08:23 +0000 UTC Normal StatefulSet.apps users-haproxy SuccessfulCreate create Pod users-haproxy-1 in StatefulSet users-haproxy successful logger.go:42: 15:15:39 | users | 2023-08-17 15:08:24 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:25 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 377.683721ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:25 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:25 +0000 UTC Normal Pod users-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:26 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:26 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 382.119639ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:26 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Created Created container haproxy logger.go:42: 15:15:39 | users | 2023-08-17 15:08:27 +0000 UTC Normal Pod users-haproxy-1.spec.containers{haproxy} Started Started container haproxy logger.go:42: 15:15:39 | users | 2023-08-17 15:08:27 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:27 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 388.657952ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:27 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:08:27 +0000 UTC Normal Pod users-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:08:27 +0000 UTC Normal Pod users-haproxy-2 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-haproxy-2 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-brgf logger.go:42: 15:15:39 | users | 2023-08-17 15:08:27 +0000 UTC Normal StatefulSet.apps users-haproxy SuccessfulCreate create Pod users-haproxy-2 in StatefulSet users-haproxy successful logger.go:42: 15:15:39 | users | 2023-08-17 15:08:28 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:28 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 408.031643ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:28 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:28 +0000 UTC Normal Pod users-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-751407c1-543f-4b1f-acb1-93550d50929c" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:29 +0000 UTC Normal Pod users-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:29 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:30 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:30 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 814.83172ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:30 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:30 +0000 UTC Normal Pod users-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:31 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 806.056849ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:31 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Created Created container haproxy logger.go:42: 15:15:39 | users | 2023-08-17 15:08:31 +0000 UTC Normal Pod users-haproxy-2.spec.containers{haproxy} Started Started container haproxy logger.go:42: 15:15:39 | users | 2023-08-17 15:08:31 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:31 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 332.432724ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:31 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:32 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:08:32 +0000 UTC Normal Pod users-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:08:32 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 623.73563ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:32 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Created Created container mysql logger.go:42: 15:15:39 | users | 2023-08-17 15:08:32 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Started Started container mysql logger.go:42: 15:15:39 | users | 2023-08-17 15:08:32 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:32 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 426.676145ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:32 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup logger.go:42: 15:15:39 | users | 2023-08-17 15:08:33 +0000 UTC Normal Pod users-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup logger.go:42: 15:15:39 | users | 2023-08-17 15:08:33 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:33 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 377.66881ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:33 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat logger.go:42: 15:15:39 | users | 2023-08-17 15:08:33 +0000 UTC Normal Pod users-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat logger.go:42: 15:15:39 | users | 2023-08-17 15:08:41 +0000 UTC Normal Pod users-orc-2 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-orc-2 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-qrsp logger.go:42: 15:15:39 | users | 2023-08-17 15:08:41 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulCreate create Pod users-orc-2 in StatefulSet users-orc successful logger.go:42: 15:15:39 | users | 2023-08-17 15:08:42 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:42 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 352.203258ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:42 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Created Created container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:42 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Started Started container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:08:43 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:44 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 397.612071ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:44 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Created Created container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:08:44 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Started Started container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:08:44 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:08:44 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 373.465962ms logger.go:42: 15:15:39 | users | 2023-08-17 15:08:44 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:08:44 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:08:50 +0000 UTC Warning Pod users-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: logger.go:42: 15:15:39 | users | 2023-08-17 15:08:50 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted logger.go:42: 15:15:39 | users | 2023-08-17 15:08:53 +0000 UTC Normal Pod users-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 387.610554ms logger.go:42: 15:15:39 | users | 2023-08-17 15:09:24 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding logger.go:42: 15:15:39 | users | 2023-08-17 15:09:24 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator logger.go:42: 15:15:39 | users | 2023-08-17 15:09:24 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-worthy-amoeba/datadir-users-mysql-2" logger.go:42: 15:15:39 | users | 2023-08-17 15:09:24 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Claim datadir-users-mysql-2 Pod users-mysql-2 in StatefulSet users-mysql success logger.go:42: 15:15:39 | users | 2023-08-17 15:09:24 +0000 UTC Normal StatefulSet.apps users-mysql SuccessfulCreate create Pod users-mysql-2 in StatefulSet users-mysql successful logger.go:42: 15:15:39 | users | 2023-08-17 15:09:28 +0000 UTC Normal PersistentVolumeClaim datadir-users-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-2b50963a-8fc2-41a4-ad31-7c3dec373a22 logger.go:42: 15:15:39 | users | 2023-08-17 15:09:28 +0000 UTC Normal Pod users-mysql-2 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-mysql-2 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-4tf7 logger.go:42: 15:15:39 | users | 2023-08-17 15:09:35 +0000 UTC Normal Pod users-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-2b50963a-8fc2-41a4-ad31-7c3dec373a22" logger.go:42: 15:15:39 | users | 2023-08-17 15:09:37 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:09:37 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 500.271775ms logger.go:42: 15:15:39 | users | 2023-08-17 15:09:37 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init logger.go:42: 15:15:39 | users | 2023-08-17 15:09:37 +0000 UTC Normal Pod users-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init logger.go:42: 15:15:39 | users | 2023-08-17 15:09:39 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" logger.go:42: 15:15:39 | users | 2023-08-17 15:09:40 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 363.658827ms logger.go:42: 15:15:39 | users | 2023-08-17 15:09:40 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Created Created container mysql logger.go:42: 15:15:39 | users | 2023-08-17 15:09:40 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Started Started container mysql logger.go:42: 15:15:39 | users | 2023-08-17 15:09:40 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" logger.go:42: 15:15:39 | users | 2023-08-17 15:09:40 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 371.53592ms logger.go:42: 15:15:39 | users | 2023-08-17 15:09:40 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup logger.go:42: 15:15:39 | users | 2023-08-17 15:09:40 +0000 UTC Normal Pod users-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup logger.go:42: 15:15:39 | users | 2023-08-17 15:09:40 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" logger.go:42: 15:15:39 | users | 2023-08-17 15:09:41 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 384.591614ms logger.go:42: 15:15:39 | users | 2023-08-17 15:09:41 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Created Created container pt-heartbeat logger.go:42: 15:15:39 | users | 2023-08-17 15:09:41 +0000 UTC Normal Pod users-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat logger.go:42: 15:15:39 | users | 2023-08-17 15:09:57 +0000 UTC Warning Pod users-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: logger.go:42: 15:15:39 | users | 2023-08-17 15:09:57 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted logger.go:42: 15:15:39 | users | 2023-08-17 15:10:01 +0000 UTC Normal Pod users-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 420.952251ms logger.go:42: 15:15:39 | users | 2023-08-17 15:11:11 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Killing Stopping container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:11:11 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:11:11 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulDelete delete Pod users-orc-2 in StatefulSet users-orc successful logger.go:42: 15:15:39 | users | 2023-08-17 15:11:42 +0000 UTC Normal Pod users-orc-2 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-orc-2 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-qrsp logger.go:42: 15:15:39 | users | 2023-08-17 15:11:43 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:11:43 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 419.640308ms logger.go:42: 15:15:39 | users | 2023-08-17 15:11:43 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Created Created container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:11:44 +0000 UTC Normal Pod users-orc-2.spec.initContainers{orc-init} Started Started container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:11:45 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:11:45 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 347.910266ms logger.go:42: 15:15:39 | users | 2023-08-17 15:11:45 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Created Created container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:11:45 +0000 UTC Normal Pod users-orc-2.spec.containers{orc} Started Started container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:11:45 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:11:46 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 372.511877ms logger.go:42: 15:15:39 | users | 2023-08-17 15:11:46 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:11:46 +0000 UTC Normal Pod users-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:12:18 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Killing Stopping container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:12:18 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:12:18 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulDelete delete Pod users-orc-1 in StatefulSet users-orc successful logger.go:42: 15:15:39 | users | 2023-08-17 15:12:49 +0000 UTC Normal Pod users-orc-1 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-orc-1 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-4tf7 logger.go:42: 15:15:39 | users | 2023-08-17 15:12:50 +0000 UTC Warning Pod users-orc-1 FailedMount MountVolume.SetUp failed for volume "config" : failed to sync configmap cache: timed out waiting for the condition logger.go:42: 15:15:39 | users | 2023-08-17 15:12:52 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:12:52 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 377.63359ms logger.go:42: 15:15:39 | users | 2023-08-17 15:12:52 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Created Created container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:12:52 +0000 UTC Normal Pod users-orc-1.spec.initContainers{orc-init} Started Started container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:12:54 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:12:54 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 385.319924ms logger.go:42: 15:15:39 | users | 2023-08-17 15:12:54 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Created Created container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:12:54 +0000 UTC Normal Pod users-orc-1.spec.containers{orc} Started Started container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:12:54 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:12:55 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 345.778226ms logger.go:42: 15:15:39 | users | 2023-08-17 15:12:55 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:12:55 +0000 UTC Normal Pod users-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:13:26 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Killing Stopping container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:13:26 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:13:26 +0000 UTC Normal StatefulSet.apps users-orc SuccessfulDelete delete Pod users-orc-0 in StatefulSet users-orc successful logger.go:42: 15:15:39 | users | 2023-08-17 15:13:58 +0000 UTC Normal Pod users-orc-0 Scheduled Successfully assigned kuttl-test-worthy-amoeba/users-orc-0 to gke-jen-ps-424-70568ae-7-default-pool-95e5ef23-brgf logger.go:42: 15:15:39 | users | 2023-08-17 15:13:58 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:15:39 | users | 2023-08-17 15:13:59 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 442.709787ms logger.go:42: 15:15:39 | users | 2023-08-17 15:13:59 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Created Created container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:13:59 +0000 UTC Normal Pod users-orc-0.spec.initContainers{orc-init} Started Started container orc-init logger.go:42: 15:15:39 | users | 2023-08-17 15:14:00 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:14:01 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 393.011493ms logger.go:42: 15:15:39 | users | 2023-08-17 15:14:01 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Created Created container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:14:01 +0000 UTC Normal Pod users-orc-0.spec.containers{orc} Started Started container orc logger.go:42: 15:15:39 | users | 2023-08-17 15:14:01 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:15:39 | users | 2023-08-17 15:14:01 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 416.091649ms logger.go:42: 15:15:39 | users | 2023-08-17 15:14:01 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:15:39 | users | 2023-08-17 15:14:01 +0000 UTC Normal Pod users-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:15:39 | users | Deleting namespace: kuttl-test-worthy-amoeba === CONT kuttl harness.go:405: run tests finished harness.go:513: cleaning up harness.go:570: removing temp folder: "" --- PASS: kuttl (573.50s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/users (570.05s) PASS