=== RUN kuttl harness.go:462: starting setup harness.go:252: running tests using configured kubeconfig. harness.go:275: Successful connection to cluster at: https://34.69.239.223 harness.go:360: running tests harness.go:73: going to run test suite with timeout of 180 seconds for each step harness.go:372: testsuite: e2e-tests/tests has 30 tests === RUN kuttl/harness === RUN kuttl/harness/scaling === PAUSE kuttl/harness/scaling === CONT kuttl/harness/scaling logger.go:42: 14:47:49 | scaling | Creating namespace: kuttl-test-useful-sculpin logger.go:42: 14:47:50 | scaling/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 14:47:50 | scaling/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 14:47:50 | scaling/0-deploy-operator | + source ../../functions logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ realpath ../../.. logger.go:42: 14:47:50 | scaling/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:47:50 | scaling/0-deploy-operator | ++++ pwd logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/scaling logger.go:42: 14:47:50 | scaling/0-deploy-operator | ++ test_name=scaling logger.go:42: 14:47:50 | scaling/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:47:50 | scaling/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ GIT_BRANCH=PR-523 logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export VERSION=PR-523-f00253e logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ VERSION=PR-523-f00253e logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:47:50 | scaling/0-deploy-operator | ++++ which gdate logger.go:42: 14:47:50 | scaling/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:47:50 | scaling/0-deploy-operator | ++++ which date logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ command -v oc logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ kubectl get nodes logger.go:42: 14:47:50 | scaling/0-deploy-operator | +++ grep '^minikube' logger.go:42: 14:47:50 | scaling/0-deploy-operator | + init_temp_dir logger.go:42: 14:47:50 | scaling/0-deploy-operator | + rm -rf /tmp/kuttl/ps/scaling logger.go:42: 14:47:50 | scaling/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/scaling logger.go:42: 14:47:50 | scaling/0-deploy-operator | + deploy_operator logger.go:42: 14:47:50 | scaling/0-deploy-operator | + kubectl -n kuttl-test-useful-sculpin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/crd.yaml logger.go:42: 14:47:51 | scaling/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 14:47:51 | scaling/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 14:47:52 | scaling/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 14:47:52 | scaling/0-deploy-operator | + kubectl -n kuttl-test-useful-sculpin apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/rbac.yaml logger.go:42: 14:47:53 | scaling/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 14:47:53 | scaling/0-deploy-operator | serviceaccount/percona-server-mysql-operator-orchestrator created logger.go:42: 14:47:54 | scaling/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 14:47:54 | scaling/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 14:47:54 | scaling/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-orchestrator created logger.go:42: 14:47:55 | scaling/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 14:47:55 | scaling/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 14:47:55 | scaling/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-orchestrator created logger.go:42: 14:47:55 | scaling/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 14:47:55 | scaling/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 14:47:55 | scaling/0-deploy-operator | + kubectl -n kuttl-test-useful-sculpin apply -f - logger.go:42: 14:47:55 | scaling/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:47:55 | scaling/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-523-f00253e"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/operator.yaml logger.go:42: 14:47:56 | scaling/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 14:47:57 | scaling/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 14:47:57 | scaling/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 14:47:57 | scaling/0-deploy-operator | + kubectl -n kuttl-test-useful-sculpin apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/secrets.yaml logger.go:42: 14:47:58 | scaling/0-deploy-operator | secret/test-secrets created logger.go:42: 14:47:58 | scaling/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 14:47:58 | scaling/0-deploy-operator | + kubectl -n kuttl-test-useful-sculpin apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/ssl-secret.yaml logger.go:42: 14:47:59 | scaling/0-deploy-operator | secret/test-ssl created logger.go:42: 14:47:59 | scaling/0-deploy-operator | + deploy_client logger.go:42: 14:47:59 | scaling/0-deploy-operator | + kubectl -n kuttl-test-useful-sculpin apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/client.yaml logger.go:42: 14:47:59 | scaling/0-deploy-operator | pod/mysql-client created logger.go:42: 14:48:08 | scaling/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 14:48:08 | scaling/1-create-cluster | starting test step 1-create-cluster logger.go:42: 14:48:08 | scaling/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 14:48:08 | scaling/1-create-cluster | + source ../../functions logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ realpath ../../.. logger.go:42: 14:48:08 | scaling/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:08 | scaling/1-create-cluster | ++++ pwd logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/scaling logger.go:42: 14:48:08 | scaling/1-create-cluster | ++ test_name=scaling logger.go:42: 14:48:08 | scaling/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:48:08 | scaling/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ GIT_BRANCH=PR-523 logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export VERSION=PR-523-f00253e logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ VERSION=PR-523-f00253e logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:08 | scaling/1-create-cluster | ++++ which gdate logger.go:42: 14:48:08 | scaling/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:48:08 | scaling/1-create-cluster | ++++ which date logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ date=/usr/bin/date logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ command -v oc logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ kubectl get nodes logger.go:42: 14:48:08 | scaling/1-create-cluster | +++ grep '^minikube' logger.go:42: 14:48:09 | scaling/1-create-cluster | + get_cr logger.go:42: 14:48:09 | scaling/1-create-cluster | + local name_suffix= logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | + kubectl -n kuttl-test-useful-sculpin apply -f - logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-523-f00253e"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | ++ printf '.metadata.name="%s"' scaling logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.metadata.name="scaling"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/cr.yaml logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:09 | scaling/1-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:09 | scaling/1-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | + '[' -n '' ']' logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval - logger.go:42: 14:48:09 | scaling/1-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:09 | scaling/1-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 14:48:09 | scaling/1-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 14:48:10 | scaling/1-create-cluster | perconaservermysql.ps.percona.com/scaling created logger.go:42: 14:51:34 | scaling/1-create-cluster | test step completed 1-create-cluster logger.go:42: 14:51:34 | scaling/2-write-data | starting test step 2-write-data logger.go:42: 14:51:34 | scaling/2-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password"] logger.go:42: 14:51:34 | scaling/2-write-data | + source ../../functions logger.go:42: 14:51:34 | scaling/2-write-data | +++ realpath ../../.. logger.go:42: 14:51:34 | scaling/2-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:34 | scaling/2-write-data | ++++ pwd logger.go:42: 14:51:34 | scaling/2-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/scaling logger.go:42: 14:51:34 | scaling/2-write-data | ++ test_name=scaling logger.go:42: 14:51:34 | scaling/2-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:51:34 | scaling/2-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:34 | scaling/2-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:34 | scaling/2-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:34 | scaling/2-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:34 | scaling/2-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:34 | scaling/2-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:34 | scaling/2-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:34 | scaling/2-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:34 | scaling/2-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:51:34 | scaling/2-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:51:34 | scaling/2-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:51:34 | scaling/2-write-data | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:51:34 | scaling/2-write-data | +++ GIT_BRANCH=PR-523 logger.go:42: 14:51:34 | scaling/2-write-data | +++ export VERSION=PR-523-f00253e logger.go:42: 14:51:34 | scaling/2-write-data | +++ VERSION=PR-523-f00253e logger.go:42: 14:51:34 | scaling/2-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:34 | scaling/2-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:34 | scaling/2-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:34 | scaling/2-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:34 | scaling/2-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:34 | scaling/2-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:34 | scaling/2-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:34 | scaling/2-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:34 | scaling/2-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:34 | scaling/2-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:34 | scaling/2-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:34 | scaling/2-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:34 | scaling/2-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:34 | scaling/2-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:34 | scaling/2-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:34 | scaling/2-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:34 | scaling/2-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:34 | scaling/2-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:34 | scaling/2-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:34 | scaling/2-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:34 | scaling/2-write-data | ++++ which gdate logger.go:42: 14:51:34 | scaling/2-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:51:34 | scaling/2-write-data | ++++ which date logger.go:42: 14:51:34 | scaling/2-write-data | +++ date=/usr/bin/date logger.go:42: 14:51:34 | scaling/2-write-data | +++ command -v oc logger.go:42: 14:51:34 | scaling/2-write-data | +++ kubectl get nodes logger.go:42: 14:51:34 | scaling/2-write-data | +++ grep '^minikube' logger.go:42: 14:51:35 | scaling/2-write-data | +++ get_cluster_name logger.go:42: 14:51:35 | scaling/2-write-data | +++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:51:35 | scaling/2-write-data | ++ get_haproxy_svc scaling logger.go:42: 14:51:35 | scaling/2-write-data | ++ local cluster=scaling logger.go:42: 14:51:35 | scaling/2-write-data | ++ echo scaling-haproxy logger.go:42: 14:51:35 | scaling/2-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h scaling-haproxy -uroot -proot_password' logger.go:42: 14:51:35 | scaling/2-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 14:51:35 | scaling/2-write-data | + local 'uri=-h scaling-haproxy -uroot -proot_password' logger.go:42: 14:51:35 | scaling/2-write-data | + local pod= logger.go:42: 14:51:35 | scaling/2-write-data | ++ get_client_pod logger.go:42: 14:51:35 | scaling/2-write-data | ++ kubectl -n kuttl-test-useful-sculpin get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:51:36 | scaling/2-write-data | + client_pod=mysql-client logger.go:42: 14:51:36 | scaling/2-write-data | + wait_pod mysql-client logger.go:42: 14:51:36 | scaling/2-write-data | + local pod=mysql-client logger.go:42: 14:51:36 | scaling/2-write-data | + set +o xtrace logger.go:42: 14:51:36 | scaling/2-write-data | mysql-clienttrue logger.go:42: 14:51:36 | scaling/2-write-data | + kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h scaling-haproxy -uroot -proot_password' logger.go:42: 14:51:36 | scaling/2-write-data | + sed -e 's/mysql: //' logger.go:42: 14:51:36 | scaling/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:51:37 | scaling/2-write-data | + : logger.go:42: 14:51:37 | scaling/2-write-data | +++ get_cluster_name logger.go:42: 14:51:37 | scaling/2-write-data | +++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:51:38 | scaling/2-write-data | ++ get_haproxy_svc scaling logger.go:42: 14:51:38 | scaling/2-write-data | ++ local cluster=scaling logger.go:42: 14:51:38 | scaling/2-write-data | ++ echo scaling-haproxy logger.go:42: 14:51:38 | scaling/2-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h scaling-haproxy -uroot -proot_password' logger.go:42: 14:51:38 | scaling/2-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 14:51:38 | scaling/2-write-data | + local 'uri=-h scaling-haproxy -uroot -proot_password' logger.go:42: 14:51:38 | scaling/2-write-data | + local pod= logger.go:42: 14:51:38 | scaling/2-write-data | ++ get_client_pod logger.go:42: 14:51:38 | scaling/2-write-data | ++ kubectl -n kuttl-test-useful-sculpin get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:51:38 | scaling/2-write-data | + client_pod=mysql-client logger.go:42: 14:51:38 | scaling/2-write-data | + wait_pod mysql-client logger.go:42: 14:51:38 | scaling/2-write-data | + local pod=mysql-client logger.go:42: 14:51:38 | scaling/2-write-data | + set +o xtrace logger.go:42: 14:51:38 | scaling/2-write-data | mysql-clienttrue logger.go:42: 14:51:38 | scaling/2-write-data | + kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h scaling-haproxy -uroot -proot_password' logger.go:42: 14:51:38 | scaling/2-write-data | + sed -e 's/mysql: //' logger.go:42: 14:51:38 | scaling/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:51:40 | scaling/2-write-data | + : logger.go:42: 14:51:41 | scaling/2-write-data | test step completed 2-write-data logger.go:42: 14:51:41 | scaling/3-read-from-primary | starting test step 3-read-from-primary logger.go:42: 14:51:41 | scaling/3-read-from-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_mysql "SELECT * FROM myDB.myTable" "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 03-read-from-primary --from-literal=data="${data}"] logger.go:42: 14:51:41 | scaling/3-read-from-primary | + source ../../functions logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ realpath ../../.. logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++++ pwd logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/scaling logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++ test_name=scaling logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ GIT_BRANCH=PR-523 logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export VERSION=PR-523-f00253e logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ VERSION=PR-523-f00253e logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++++ which gdate logger.go:42: 14:51:41 | scaling/3-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++++ which date logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ date=/usr/bin/date logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ command -v oc logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ kubectl get nodes logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ grep '^minikube' logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++++ get_cluster_name logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ get_haproxy_svc scaling logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ local cluster=scaling logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ echo scaling-haproxy logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h scaling-haproxy -uroot -proot_password' logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++ local 'uri=-h scaling-haproxy -uroot -proot_password' logger.go:42: 14:51:41 | scaling/3-read-from-primary | ++ local pod= logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ get_client_pod logger.go:42: 14:51:41 | scaling/3-read-from-primary | +++ kubectl -n kuttl-test-useful-sculpin get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:51:42 | scaling/3-read-from-primary | ++ client_pod=mysql-client logger.go:42: 14:51:42 | scaling/3-read-from-primary | ++ wait_pod mysql-client logger.go:42: 14:51:42 | scaling/3-read-from-primary | ++ local pod=mysql-client logger.go:42: 14:51:42 | scaling/3-read-from-primary | ++ set +o xtrace logger.go:42: 14:51:42 | scaling/3-read-from-primary | mysql-clienttrue logger.go:42: 14:51:42 | scaling/3-read-from-primary | ++ kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h scaling-haproxy -uroot -proot_password' logger.go:42: 14:51:42 | scaling/3-read-from-primary | ++ sed -e 's/mysql: //' logger.go:42: 14:51:42 | scaling/3-read-from-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:51:43 | scaling/3-read-from-primary | + data=100500 logger.go:42: 14:51:43 | scaling/3-read-from-primary | + kubectl create configmap -n kuttl-test-useful-sculpin 03-read-from-primary --from-literal=data=100500 logger.go:42: 14:51:44 | scaling/3-read-from-primary | configmap/03-read-from-primary created logger.go:42: 14:51:45 | scaling/3-read-from-primary | test step completed 3-read-from-primary logger.go:42: 14:51:45 | scaling/4-read-from-replicas | starting test step 4-read-from-replicas logger.go:42: 14:51:45 | scaling/4-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions args='' size=3 for i in $(seq 0 $((size - 1))); do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") args="${args} --from-literal=${host}=${data}" done kubectl create configmap -n "${NAMESPACE}" 04-read-from-replicas ${args}] logger.go:42: 14:51:45 | scaling/4-read-from-replicas | + source ../../functions logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ realpath ../../.. logger.go:42: 14:51:45 | scaling/4-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:45 | scaling/4-read-from-replicas | ++++ pwd logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/scaling logger.go:42: 14:51:45 | scaling/4-read-from-replicas | ++ test_name=scaling logger.go:42: 14:51:45 | scaling/4-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:51:45 | scaling/4-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ GIT_BRANCH=PR-523 logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export VERSION=PR-523-f00253e logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ VERSION=PR-523-f00253e logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:45 | scaling/4-read-from-replicas | ++++ which gdate logger.go:42: 14:51:45 | scaling/4-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:51:45 | scaling/4-read-from-replicas | ++++ which date logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ command -v oc logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ kubectl get nodes logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ grep '^minikube' logger.go:42: 14:51:45 | scaling/4-read-from-replicas | + args= logger.go:42: 14:51:45 | scaling/4-read-from-replicas | + size=3 logger.go:42: 14:51:45 | scaling/4-read-from-replicas | ++ seq 0 2 logger.go:42: 14:51:45 | scaling/4-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ get_cluster_name logger.go:42: 14:51:45 | scaling/4-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ get_mysql_headless_fqdn scaling 0 logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ local cluster=scaling logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ local index=0 logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ echo scaling-mysql-0.scaling-mysql logger.go:42: 14:51:46 | scaling/4-read-from-replicas | + host=scaling-mysql-0.scaling-mysql logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h scaling-mysql-0.scaling-mysql -uroot -proot_password' logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ local 'uri=-h scaling-mysql-0.scaling-mysql -uroot -proot_password' logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ local pod= logger.go:42: 14:51:46 | scaling/4-read-from-replicas | +++ get_client_pod logger.go:42: 14:51:46 | scaling/4-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ local pod=mysql-client logger.go:42: 14:51:46 | scaling/4-read-from-replicas | ++ set +o xtrace logger.go:42: 14:51:47 | scaling/4-read-from-replicas | mysql-clienttrue logger.go:42: 14:51:47 | scaling/4-read-from-replicas | ++ kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h scaling-mysql-0.scaling-mysql -uroot -proot_password' logger.go:42: 14:51:47 | scaling/4-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 14:51:47 | scaling/4-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:51:48 | scaling/4-read-from-replicas | + data=100500 logger.go:42: 14:51:48 | scaling/4-read-from-replicas | + args=' --from-literal=scaling-mysql-0.scaling-mysql=100500' logger.go:42: 14:51:48 | scaling/4-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 14:51:48 | scaling/4-read-from-replicas | +++ get_cluster_name logger.go:42: 14:51:48 | scaling/4-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:51:48 | scaling/4-read-from-replicas | ++ get_mysql_headless_fqdn scaling 1 logger.go:42: 14:51:48 | scaling/4-read-from-replicas | ++ local cluster=scaling logger.go:42: 14:51:48 | scaling/4-read-from-replicas | ++ local index=1 logger.go:42: 14:51:48 | scaling/4-read-from-replicas | ++ echo scaling-mysql-1.scaling-mysql logger.go:42: 14:51:48 | scaling/4-read-from-replicas | + host=scaling-mysql-1.scaling-mysql logger.go:42: 14:51:48 | scaling/4-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h scaling-mysql-1.scaling-mysql -uroot -proot_password' logger.go:42: 14:51:48 | scaling/4-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 14:51:48 | scaling/4-read-from-replicas | ++ local 'uri=-h scaling-mysql-1.scaling-mysql -uroot -proot_password' logger.go:42: 14:51:48 | scaling/4-read-from-replicas | ++ local pod= logger.go:42: 14:51:48 | scaling/4-read-from-replicas | +++ get_client_pod logger.go:42: 14:51:48 | scaling/4-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:51:49 | scaling/4-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 14:51:49 | scaling/4-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 14:51:49 | scaling/4-read-from-replicas | ++ local pod=mysql-client logger.go:42: 14:51:49 | scaling/4-read-from-replicas | ++ set +o xtrace logger.go:42: 14:51:49 | scaling/4-read-from-replicas | mysql-clienttrue logger.go:42: 14:51:49 | scaling/4-read-from-replicas | ++ kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h scaling-mysql-1.scaling-mysql -uroot -proot_password' logger.go:42: 14:51:49 | scaling/4-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 14:51:49 | scaling/4-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:51:50 | scaling/4-read-from-replicas | + data=100500 logger.go:42: 14:51:50 | scaling/4-read-from-replicas | + args=' --from-literal=scaling-mysql-0.scaling-mysql=100500 --from-literal=scaling-mysql-1.scaling-mysql=100500' logger.go:42: 14:51:50 | scaling/4-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 14:51:50 | scaling/4-read-from-replicas | +++ get_cluster_name logger.go:42: 14:51:50 | scaling/4-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ get_mysql_headless_fqdn scaling 2 logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ local cluster=scaling logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ local index=2 logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ echo scaling-mysql-2.scaling-mysql logger.go:42: 14:51:51 | scaling/4-read-from-replicas | + host=scaling-mysql-2.scaling-mysql logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h scaling-mysql-2.scaling-mysql -uroot -proot_password' logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ local 'uri=-h scaling-mysql-2.scaling-mysql -uroot -proot_password' logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ local pod= logger.go:42: 14:51:51 | scaling/4-read-from-replicas | +++ get_client_pod logger.go:42: 14:51:51 | scaling/4-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ local pod=mysql-client logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ set +o xtrace logger.go:42: 14:51:51 | scaling/4-read-from-replicas | mysql-clienttrue logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h scaling-mysql-2.scaling-mysql -uroot -proot_password' logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 14:51:51 | scaling/4-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:51:53 | scaling/4-read-from-replicas | + data=100500 logger.go:42: 14:51:53 | scaling/4-read-from-replicas | + args=' --from-literal=scaling-mysql-0.scaling-mysql=100500 --from-literal=scaling-mysql-1.scaling-mysql=100500 --from-literal=scaling-mysql-2.scaling-mysql=100500' logger.go:42: 14:51:53 | scaling/4-read-from-replicas | + kubectl create configmap -n kuttl-test-useful-sculpin 04-read-from-replicas --from-literal=scaling-mysql-0.scaling-mysql=100500 --from-literal=scaling-mysql-1.scaling-mysql=100500 --from-literal=scaling-mysql-2.scaling-mysql=100500 logger.go:42: 14:51:53 | scaling/4-read-from-replicas | configmap/04-read-from-replicas created logger.go:42: 14:51:54 | scaling/4-read-from-replicas | test step completed 4-read-from-replicas logger.go:42: 14:51:54 | scaling/5-check-orchestrator | starting test step 5-check-orchestrator logger.go:42: 14:51:54 | scaling/5-check-orchestrator | running command: [sh -c set -o errexit set -o xtrace source ../../functions orc_host=$(get_orc_headless_fqdn $(get_cluster_name) 0) cluster=$(run_curl "http://${orc_host}:3000/api/clusters/" | jq -r .[0] | sed "s/.${NAMESPACE}//g") args="--from-literal=cluster=${cluster}" run_curl "http://${orc_host}:3000/api/cluster/${cluster}/" | jq -r .[].Key.Hostname | sed "s/.${NAMESPACE}//g" >"${TEMP_DIR}/instances" args="${args} --from-file=instances=${TEMP_DIR}/instances" kubectl create configmap -n "${NAMESPACE}" 05-check-orchestrator ${args}] logger.go:42: 14:51:54 | scaling/5-check-orchestrator | + source ../../functions logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ realpath ../../.. logger.go:42: 14:51:54 | scaling/5-check-orchestrator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:54 | scaling/5-check-orchestrator | ++++ pwd logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/scaling logger.go:42: 14:51:54 | scaling/5-check-orchestrator | ++ test_name=scaling logger.go:42: 14:51:54 | scaling/5-check-orchestrator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:51:54 | scaling/5-check-orchestrator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ GIT_BRANCH=PR-523 logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export VERSION=PR-523-f00253e logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ VERSION=PR-523-f00253e logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:54 | scaling/5-check-orchestrator | ++++ which gdate logger.go:42: 14:51:54 | scaling/5-check-orchestrator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:51:54 | scaling/5-check-orchestrator | ++++ which date logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ date=/usr/bin/date logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ command -v oc logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ kubectl get nodes logger.go:42: 14:51:54 | scaling/5-check-orchestrator | +++ grep '^minikube' logger.go:42: 14:51:55 | scaling/5-check-orchestrator | +++ get_cluster_name logger.go:42: 14:51:55 | scaling/5-check-orchestrator | +++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:51:55 | scaling/5-check-orchestrator | ++ get_orc_headless_fqdn scaling 0 logger.go:42: 14:51:55 | scaling/5-check-orchestrator | ++ local cluster=scaling logger.go:42: 14:51:55 | scaling/5-check-orchestrator | ++ local index=0 logger.go:42: 14:51:55 | scaling/5-check-orchestrator | ++ echo scaling-orc-0.scaling-orc logger.go:42: 14:51:55 | scaling/5-check-orchestrator | + orc_host=scaling-orc-0.scaling-orc logger.go:42: 14:51:55 | scaling/5-check-orchestrator | ++ run_curl http://scaling-orc-0.scaling-orc:3000/api/clusters/ logger.go:42: 14:51:55 | scaling/5-check-orchestrator | ++ kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'curl -s -k http://scaling-orc-0.scaling-orc:3000/api/clusters/' logger.go:42: 14:51:55 | scaling/5-check-orchestrator | ++ jq -r '.[0]' logger.go:42: 14:51:55 | scaling/5-check-orchestrator | ++ sed s/.kuttl-test-useful-sculpin//g logger.go:42: 14:51:56 | scaling/5-check-orchestrator | + cluster=scaling-mysql-0.scaling-mysql:3306 logger.go:42: 14:51:56 | scaling/5-check-orchestrator | + args=--from-literal=cluster=scaling-mysql-0.scaling-mysql:3306 logger.go:42: 14:51:56 | scaling/5-check-orchestrator | + run_curl http://scaling-orc-0.scaling-orc:3000/api/cluster/scaling-mysql-0.scaling-mysql:3306/ logger.go:42: 14:51:56 | scaling/5-check-orchestrator | + kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'curl -s -k http://scaling-orc-0.scaling-orc:3000/api/cluster/scaling-mysql-0.scaling-mysql:3306/' logger.go:42: 14:51:56 | scaling/5-check-orchestrator | + jq -r '.[].Key.Hostname' logger.go:42: 14:51:56 | scaling/5-check-orchestrator | + sed s/.kuttl-test-useful-sculpin//g logger.go:42: 14:51:57 | scaling/5-check-orchestrator | + args='--from-literal=cluster=scaling-mysql-0.scaling-mysql:3306 --from-file=instances=/tmp/kuttl/ps/scaling/instances' logger.go:42: 14:51:57 | scaling/5-check-orchestrator | + kubectl create configmap -n kuttl-test-useful-sculpin 05-check-orchestrator --from-literal=cluster=scaling-mysql-0.scaling-mysql:3306 --from-file=instances=/tmp/kuttl/ps/scaling/instances logger.go:42: 14:51:58 | scaling/5-check-orchestrator | configmap/05-check-orchestrator created logger.go:42: 14:51:59 | scaling/5-check-orchestrator | test step completed 5-check-orchestrator logger.go:42: 14:51:59 | scaling/6-scale-up | starting test step 6-scale-up logger.go:42: 14:51:59 | scaling/6-scale-up | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.mysql.size=5' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=5' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 14:51:59 | scaling/6-scale-up | + source ../../functions logger.go:42: 14:51:59 | scaling/6-scale-up | +++ realpath ../../.. logger.go:42: 14:51:59 | scaling/6-scale-up | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:59 | scaling/6-scale-up | ++++ pwd logger.go:42: 14:51:59 | scaling/6-scale-up | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/scaling logger.go:42: 14:51:59 | scaling/6-scale-up | ++ test_name=scaling logger.go:42: 14:51:59 | scaling/6-scale-up | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:59 | scaling/6-scale-up | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:59 | scaling/6-scale-up | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:59 | scaling/6-scale-up | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:59 | scaling/6-scale-up | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:51:59 | scaling/6-scale-up | +++ TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:51:59 | scaling/6-scale-up | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:51:59 | scaling/6-scale-up | +++ GIT_BRANCH=PR-523 logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export VERSION=PR-523-f00253e logger.go:42: 14:51:59 | scaling/6-scale-up | +++ VERSION=PR-523-f00253e logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:59 | scaling/6-scale-up | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:59 | scaling/6-scale-up | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:59 | scaling/6-scale-up | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:59 | scaling/6-scale-up | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:59 | scaling/6-scale-up | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:59 | scaling/6-scale-up | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:59 | scaling/6-scale-up | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:59 | scaling/6-scale-up | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:59 | scaling/6-scale-up | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:59 | scaling/6-scale-up | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:59 | scaling/6-scale-up | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:59 | scaling/6-scale-up | ++++ which gdate logger.go:42: 14:51:59 | scaling/6-scale-up | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:51:59 | scaling/6-scale-up | ++++ which date logger.go:42: 14:51:59 | scaling/6-scale-up | +++ date=/usr/bin/date logger.go:42: 14:51:59 | scaling/6-scale-up | +++ command -v oc logger.go:42: 14:51:59 | scaling/6-scale-up | +++ kubectl get nodes logger.go:42: 14:51:59 | scaling/6-scale-up | +++ grep '^minikube' logger.go:42: 14:51:59 | scaling/6-scale-up | + get_cr logger.go:42: 14:51:59 | scaling/6-scale-up | + local name_suffix= logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval .spec.mysql.size=5 - logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval .spec.orchestrator.size=5 - logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 14:51:59 | scaling/6-scale-up | + kubectl -n kuttl-test-useful-sculpin apply -f - logger.go:42: 14:51:59 | scaling/6-scale-up | ++ printf '.metadata.name="%s"' scaling logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.metadata.name="scaling"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/cr.yaml logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 14:51:59 | scaling/6-scale-up | + '[' -n '' ']' logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval - logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:51:59 | scaling/6-scale-up | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 14:51:59 | scaling/6-scale-up | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 14:51:59 | scaling/6-scale-up | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 14:51:59 | scaling/6-scale-up | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:59 | scaling/6-scale-up | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 14:51:59 | scaling/6-scale-up | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:59 | scaling/6-scale-up | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 14:51:59 | scaling/6-scale-up | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:59 | scaling/6-scale-up | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-523-f00253e"' - logger.go:42: 14:52:01 | scaling/6-scale-up | perconaservermysql.ps.percona.com/scaling configured logger.go:42: 14:54:21 | scaling/6-scale-up | test step completed 6-scale-up logger.go:42: 14:54:21 | scaling/7-read-from-replicas | starting test step 7-read-from-replicas logger.go:42: 14:54:21 | scaling/7-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions args='' size=5 for i in $(seq 0 $((size - 1))); do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") args="${args} --from-literal=${host}=${data}" done kubectl create configmap -n "${NAMESPACE}" 07-read-from-replicas ${args}] logger.go:42: 14:54:21 | scaling/7-read-from-replicas | + source ../../functions logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ realpath ../../.. logger.go:42: 14:54:21 | scaling/7-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:54:21 | scaling/7-read-from-replicas | ++++ pwd logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/scaling logger.go:42: 14:54:21 | scaling/7-read-from-replicas | ++ test_name=scaling logger.go:42: 14:54:21 | scaling/7-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:54:21 | scaling/7-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ GIT_BRANCH=PR-523 logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export VERSION=PR-523-f00253e logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ VERSION=PR-523-f00253e logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:54:21 | scaling/7-read-from-replicas | ++++ which gdate logger.go:42: 14:54:21 | scaling/7-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:54:21 | scaling/7-read-from-replicas | ++++ which date logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ command -v oc logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ kubectl get nodes logger.go:42: 14:54:21 | scaling/7-read-from-replicas | +++ grep '^minikube' logger.go:42: 14:54:22 | scaling/7-read-from-replicas | + args= logger.go:42: 14:54:22 | scaling/7-read-from-replicas | + size=5 logger.go:42: 14:54:22 | scaling/7-read-from-replicas | ++ seq 0 4 logger.go:42: 14:54:22 | scaling/7-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 14:54:22 | scaling/7-read-from-replicas | +++ get_cluster_name logger.go:42: 14:54:22 | scaling/7-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:54:22 | scaling/7-read-from-replicas | ++ get_mysql_headless_fqdn scaling 0 logger.go:42: 14:54:22 | scaling/7-read-from-replicas | ++ local cluster=scaling logger.go:42: 14:54:22 | scaling/7-read-from-replicas | ++ local index=0 logger.go:42: 14:54:22 | scaling/7-read-from-replicas | ++ echo scaling-mysql-0.scaling-mysql logger.go:42: 14:54:22 | scaling/7-read-from-replicas | + host=scaling-mysql-0.scaling-mysql logger.go:42: 14:54:22 | scaling/7-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h scaling-mysql-0.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:22 | scaling/7-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 14:54:22 | scaling/7-read-from-replicas | ++ local 'uri=-h scaling-mysql-0.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:22 | scaling/7-read-from-replicas | ++ local pod= logger.go:42: 14:54:22 | scaling/7-read-from-replicas | +++ get_client_pod logger.go:42: 14:54:22 | scaling/7-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:54:23 | scaling/7-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 14:54:23 | scaling/7-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 14:54:23 | scaling/7-read-from-replicas | ++ local pod=mysql-client logger.go:42: 14:54:23 | scaling/7-read-from-replicas | ++ set +o xtrace logger.go:42: 14:54:23 | scaling/7-read-from-replicas | mysql-clienttrue logger.go:42: 14:54:23 | scaling/7-read-from-replicas | ++ kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h scaling-mysql-0.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:23 | scaling/7-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 14:54:23 | scaling/7-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:54:24 | scaling/7-read-from-replicas | + data=100500 logger.go:42: 14:54:24 | scaling/7-read-from-replicas | + args=' --from-literal=scaling-mysql-0.scaling-mysql=100500' logger.go:42: 14:54:24 | scaling/7-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 14:54:24 | scaling/7-read-from-replicas | +++ get_cluster_name logger.go:42: 14:54:24 | scaling/7-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ get_mysql_headless_fqdn scaling 1 logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ local cluster=scaling logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ local index=1 logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ echo scaling-mysql-1.scaling-mysql logger.go:42: 14:54:25 | scaling/7-read-from-replicas | + host=scaling-mysql-1.scaling-mysql logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h scaling-mysql-1.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ local 'uri=-h scaling-mysql-1.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ local pod= logger.go:42: 14:54:25 | scaling/7-read-from-replicas | +++ get_client_pod logger.go:42: 14:54:25 | scaling/7-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ local pod=mysql-client logger.go:42: 14:54:25 | scaling/7-read-from-replicas | ++ set +o xtrace logger.go:42: 14:54:26 | scaling/7-read-from-replicas | mysql-clienttrue logger.go:42: 14:54:26 | scaling/7-read-from-replicas | ++ kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h scaling-mysql-1.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:26 | scaling/7-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 14:54:26 | scaling/7-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:54:27 | scaling/7-read-from-replicas | + data=100500 logger.go:42: 14:54:27 | scaling/7-read-from-replicas | + args=' --from-literal=scaling-mysql-0.scaling-mysql=100500 --from-literal=scaling-mysql-1.scaling-mysql=100500' logger.go:42: 14:54:27 | scaling/7-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 14:54:27 | scaling/7-read-from-replicas | +++ get_cluster_name logger.go:42: 14:54:27 | scaling/7-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:54:27 | scaling/7-read-from-replicas | ++ get_mysql_headless_fqdn scaling 2 logger.go:42: 14:54:27 | scaling/7-read-from-replicas | ++ local cluster=scaling logger.go:42: 14:54:27 | scaling/7-read-from-replicas | ++ local index=2 logger.go:42: 14:54:27 | scaling/7-read-from-replicas | ++ echo scaling-mysql-2.scaling-mysql logger.go:42: 14:54:27 | scaling/7-read-from-replicas | + host=scaling-mysql-2.scaling-mysql logger.go:42: 14:54:27 | scaling/7-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h scaling-mysql-2.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:27 | scaling/7-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 14:54:27 | scaling/7-read-from-replicas | ++ local 'uri=-h scaling-mysql-2.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:27 | scaling/7-read-from-replicas | ++ local pod= logger.go:42: 14:54:27 | scaling/7-read-from-replicas | +++ get_client_pod logger.go:42: 14:54:27 | scaling/7-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:54:28 | scaling/7-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 14:54:28 | scaling/7-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 14:54:28 | scaling/7-read-from-replicas | ++ local pod=mysql-client logger.go:42: 14:54:28 | scaling/7-read-from-replicas | ++ set +o xtrace logger.go:42: 14:54:28 | scaling/7-read-from-replicas | mysql-clienttrue logger.go:42: 14:54:28 | scaling/7-read-from-replicas | ++ kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h scaling-mysql-2.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:28 | scaling/7-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 14:54:28 | scaling/7-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:54:29 | scaling/7-read-from-replicas | + data=100500 logger.go:42: 14:54:29 | scaling/7-read-from-replicas | + args=' --from-literal=scaling-mysql-0.scaling-mysql=100500 --from-literal=scaling-mysql-1.scaling-mysql=100500 --from-literal=scaling-mysql-2.scaling-mysql=100500' logger.go:42: 14:54:29 | scaling/7-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 14:54:29 | scaling/7-read-from-replicas | +++ get_cluster_name logger.go:42: 14:54:29 | scaling/7-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ get_mysql_headless_fqdn scaling 3 logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ local cluster=scaling logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ local index=3 logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ echo scaling-mysql-3.scaling-mysql logger.go:42: 14:54:30 | scaling/7-read-from-replicas | + host=scaling-mysql-3.scaling-mysql logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h scaling-mysql-3.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ local 'uri=-h scaling-mysql-3.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ local pod= logger.go:42: 14:54:30 | scaling/7-read-from-replicas | +++ get_client_pod logger.go:42: 14:54:30 | scaling/7-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ local pod=mysql-client logger.go:42: 14:54:30 | scaling/7-read-from-replicas | ++ set +o xtrace logger.go:42: 14:54:31 | scaling/7-read-from-replicas | mysql-clienttrue logger.go:42: 14:54:31 | scaling/7-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 14:54:31 | scaling/7-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:54:31 | scaling/7-read-from-replicas | ++ kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h scaling-mysql-3.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:32 | scaling/7-read-from-replicas | + data=100500 logger.go:42: 14:54:32 | scaling/7-read-from-replicas | + args=' --from-literal=scaling-mysql-0.scaling-mysql=100500 --from-literal=scaling-mysql-1.scaling-mysql=100500 --from-literal=scaling-mysql-2.scaling-mysql=100500 --from-literal=scaling-mysql-3.scaling-mysql=100500' logger.go:42: 14:54:32 | scaling/7-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 14:54:32 | scaling/7-read-from-replicas | +++ get_cluster_name logger.go:42: 14:54:32 | scaling/7-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:54:32 | scaling/7-read-from-replicas | ++ get_mysql_headless_fqdn scaling 4 logger.go:42: 14:54:32 | scaling/7-read-from-replicas | ++ local cluster=scaling logger.go:42: 14:54:32 | scaling/7-read-from-replicas | ++ local index=4 logger.go:42: 14:54:32 | scaling/7-read-from-replicas | ++ echo scaling-mysql-4.scaling-mysql logger.go:42: 14:54:32 | scaling/7-read-from-replicas | + host=scaling-mysql-4.scaling-mysql logger.go:42: 14:54:32 | scaling/7-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h scaling-mysql-4.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:32 | scaling/7-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 14:54:32 | scaling/7-read-from-replicas | ++ local 'uri=-h scaling-mysql-4.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:32 | scaling/7-read-from-replicas | ++ local pod= logger.go:42: 14:54:32 | scaling/7-read-from-replicas | +++ get_client_pod logger.go:42: 14:54:32 | scaling/7-read-from-replicas | +++ kubectl -n kuttl-test-useful-sculpin get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:54:33 | scaling/7-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 14:54:33 | scaling/7-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 14:54:33 | scaling/7-read-from-replicas | ++ local pod=mysql-client logger.go:42: 14:54:33 | scaling/7-read-from-replicas | ++ set +o xtrace logger.go:42: 14:54:33 | scaling/7-read-from-replicas | mysql-clienttrue logger.go:42: 14:54:33 | scaling/7-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 14:54:33 | scaling/7-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:54:33 | scaling/7-read-from-replicas | ++ kubectl -n kuttl-test-useful-sculpin exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h scaling-mysql-4.scaling-mysql -uroot -proot_password' logger.go:42: 14:54:34 | scaling/7-read-from-replicas | + data=100500 logger.go:42: 14:54:34 | scaling/7-read-from-replicas | + args=' --from-literal=scaling-mysql-0.scaling-mysql=100500 --from-literal=scaling-mysql-1.scaling-mysql=100500 --from-literal=scaling-mysql-2.scaling-mysql=100500 --from-literal=scaling-mysql-3.scaling-mysql=100500 --from-literal=scaling-mysql-4.scaling-mysql=100500' logger.go:42: 14:54:34 | scaling/7-read-from-replicas | + kubectl create configmap -n kuttl-test-useful-sculpin 07-read-from-replicas --from-literal=scaling-mysql-0.scaling-mysql=100500 --from-literal=scaling-mysql-1.scaling-mysql=100500 --from-literal=scaling-mysql-2.scaling-mysql=100500 --from-literal=scaling-mysql-3.scaling-mysql=100500 --from-literal=scaling-mysql-4.scaling-mysql=100500 logger.go:42: 14:54:35 | scaling/7-read-from-replicas | configmap/07-read-from-replicas created logger.go:42: 14:54:36 | scaling/7-read-from-replicas | test step completed 7-read-from-replicas logger.go:42: 14:54:36 | scaling/8-scale-down | starting test step 8-scale-down logger.go:42: 14:54:36 | scaling/8-scale-down | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 14:54:36 | scaling/8-scale-down | + source ../../functions logger.go:42: 14:54:36 | scaling/8-scale-down | +++ realpath ../../.. logger.go:42: 14:54:36 | scaling/8-scale-down | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:54:36 | scaling/8-scale-down | ++++ pwd logger.go:42: 14:54:36 | scaling/8-scale-down | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/scaling logger.go:42: 14:54:36 | scaling/8-scale-down | ++ test_name=scaling logger.go:42: 14:54:36 | scaling/8-scale-down | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:54:36 | scaling/8-scale-down | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:54:36 | scaling/8-scale-down | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:54:36 | scaling/8-scale-down | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:54:36 | scaling/8-scale-down | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:54:36 | scaling/8-scale-down | +++ TEMP_DIR=/tmp/kuttl/ps/scaling logger.go:42: 14:54:36 | scaling/8-scale-down | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:54:36 | scaling/8-scale-down | +++ GIT_BRANCH=PR-523 logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export VERSION=PR-523-f00253e logger.go:42: 14:54:36 | scaling/8-scale-down | +++ VERSION=PR-523-f00253e logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:54:36 | scaling/8-scale-down | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:54:36 | scaling/8-scale-down | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:54:36 | scaling/8-scale-down | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:54:36 | scaling/8-scale-down | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:54:36 | scaling/8-scale-down | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:54:36 | scaling/8-scale-down | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:54:36 | scaling/8-scale-down | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:54:36 | scaling/8-scale-down | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:54:36 | scaling/8-scale-down | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:54:36 | scaling/8-scale-down | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:54:36 | scaling/8-scale-down | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:54:36 | scaling/8-scale-down | ++++ which gdate logger.go:42: 14:54:36 | scaling/8-scale-down | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:54:36 | scaling/8-scale-down | ++++ which date logger.go:42: 14:54:36 | scaling/8-scale-down | +++ date=/usr/bin/date logger.go:42: 14:54:36 | scaling/8-scale-down | +++ command -v oc logger.go:42: 14:54:36 | scaling/8-scale-down | +++ kubectl get nodes logger.go:42: 14:54:36 | scaling/8-scale-down | +++ grep '^minikube' logger.go:42: 14:54:37 | scaling/8-scale-down | + get_cr logger.go:42: 14:54:37 | scaling/8-scale-down | + local name_suffix= logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval .spec.mysql.size=3 - logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval .spec.orchestrator.size=3 - logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:54:37 | scaling/8-scale-down | + kubectl -n kuttl-test-useful-sculpin apply -f - logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 14:54:37 | scaling/8-scale-down | + '[' -n '' ']' logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval - logger.go:42: 14:54:37 | scaling/8-scale-down | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:54:37 | scaling/8-scale-down | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:54:37 | scaling/8-scale-down | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 14:54:37 | scaling/8-scale-down | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-523-f00253e"' - logger.go:42: 14:54:37 | scaling/8-scale-down | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 14:54:37 | scaling/8-scale-down | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 14:54:37 | scaling/8-scale-down | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 14:54:37 | scaling/8-scale-down | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 14:54:37 | scaling/8-scale-down | ++ printf '.metadata.name="%s"' scaling logger.go:42: 14:54:37 | scaling/8-scale-down | + yq eval '.metadata.name="scaling"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/cr.yaml logger.go:42: 14:54:38 | scaling/8-scale-down | perconaservermysql.ps.percona.com/scaling configured logger.go:42: 14:55:53 | scaling/8-scale-down | test step completed 8-scale-down logger.go:42: 14:55:53 | scaling/99-drop-finalizer | starting test step 99-drop-finalizer logger.go:42: 14:55:54 | scaling/99-drop-finalizer | PerconaServerMySQL:kuttl-test-useful-sculpin/scaling updated logger.go:42: 14:55:54 | scaling/99-drop-finalizer | test step completed 99-drop-finalizer logger.go:42: 14:55:55 | scaling | scaling events from ns kuttl-test-useful-sculpin: logger.go:42: 14:55:55 | scaling | 2024-02-05 14:47:57 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-wrphm Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/percona-server-mysql-operator-b4c599bbb-wrphm to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-58cc default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:47:57 +0000 UTC Normal ReplicaSet.apps percona-server-mysql-operator-b4c599bbb SuccessfulCreate Created pod: percona-server-mysql-operator-b4c599bbb-wrphm replicaset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:47:57 +0000 UTC Normal Deployment.apps percona-server-mysql-operator ScalingReplicaSet Scaled up replica set percona-server-mysql-operator-b4c599bbb to 1 deployment-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:47:59 +0000 UTC Normal Lease.coordination.k8s.io 08db2feb.percona.com LeaderElection percona-server-mysql-operator-b4c599bbb-wrphm_57b024ed-ef5e-4fb7-a092-50926c0d03c9 became leader percona-server-mysql-operator-b4c599bbb-wrphm_57b024ed-ef5e-4fb7-a092-50926c0d03c9 logger.go:42: 14:55:55 | scaling | 2024-02-05 14:47:59 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/mysql-client to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-58cc default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:47:59 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-wrphm.spec.containers{manager} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:47:59 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-wrphm.spec.containers{manager} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 99.711619ms (99.727118ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:47:59 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-wrphm.spec.containers{manager} Created Created container manager kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:47:59 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-wrphm.spec.containers{manager} Started Started container manager kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:00 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.0.33" already present on machine kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:00 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container mysql-client kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:00 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:14 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:14 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-useful-sculpin/datadir-scaling-mysql-0" pd.csi.storage.gke.io_gke-e79637b8881c4d69aadf-7f21-fd08-vm_1dfebf10-8ccc-4458-9c66-7c7a2d56f6ef logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:14 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-0 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:14 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulCreate create Claim datadir-scaling-mysql-0 Pod scaling-mysql-0 in StatefulSet scaling-mysql success statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:14 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulCreate create Pod scaling-mysql-0 in StatefulSet scaling-mysql successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:14 +0000 UTC Normal Pod scaling-orc-0 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-orc-0 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-58cc default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:14 +0000 UTC Normal StatefulSet.apps scaling-orc SuccessfulCreate create Pod scaling-orc-0 in StatefulSet scaling-orc successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:15 +0000 UTC Normal Pod scaling-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:15 +0000 UTC Normal Pod scaling-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 90.674723ms (90.692266ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:15 +0000 UTC Normal Pod scaling-orc-0.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:15 +0000 UTC Normal Pod scaling-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:17 +0000 UTC Normal Pod scaling-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:17 +0000 UTC Normal Pod scaling-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 82.707556ms (82.727937ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:17 +0000 UTC Normal Pod scaling-orc-0.spec.containers{orc} Created Created container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:17 +0000 UTC Normal Pod scaling-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:17 +0000 UTC Normal Pod scaling-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:17 +0000 UTC Normal Pod scaling-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 70.764104ms (70.782511ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:17 +0000 UTC Normal Pod scaling-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:17 +0000 UTC Normal Pod scaling-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:18 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-7cc8c8ad-e56d-4fd7-93f9-e12ebd441113 pd.csi.storage.gke.io_gke-e79637b8881c4d69aadf-7f21-fd08-vm_1dfebf10-8ccc-4458-9c66-7c7a2d56f6ef logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:18 +0000 UTC Normal Pod scaling-mysql-0 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-mysql-0 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-vcbp default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:22 +0000 UTC Normal Pod scaling-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-7cc8c8ad-e56d-4fd7-93f9-e12ebd441113" attachdetach-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:29 +0000 UTC Normal Pod scaling-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:30 +0000 UTC Normal Pod scaling-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 96.082733ms (96.094904ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:30 +0000 UTC Normal Pod scaling-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:30 +0000 UTC Normal Pod scaling-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 76.717295ms (76.733009ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 72.636783ms (72.64407ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 138.848573ms (138.854955ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:31 +0000 UTC Normal Pod scaling-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:49 +0000 UTC Normal Pod scaling-orc-1 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-orc-1 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-58cc default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:49 +0000 UTC Normal StatefulSet.apps scaling-orc SuccessfulCreate create Pod scaling-orc-1 in StatefulSet scaling-orc successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:50 +0000 UTC Normal Pod scaling-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:50 +0000 UTC Normal Pod scaling-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 82.761575ms (82.770955ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:50 +0000 UTC Normal Pod scaling-orc-1.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:50 +0000 UTC Normal Pod scaling-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:52 +0000 UTC Normal Pod scaling-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:52 +0000 UTC Normal Pod scaling-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 76.483539ms (76.497453ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:52 +0000 UTC Normal Pod scaling-orc-1.spec.containers{orc} Created Created container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:52 +0000 UTC Normal Pod scaling-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:52 +0000 UTC Normal Pod scaling-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:52 +0000 UTC Normal Pod scaling-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 88.323233ms (88.336102ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:52 +0000 UTC Normal Pod scaling-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:48:52 +0000 UTC Normal Pod scaling-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:04 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:04 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-1 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:04 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-useful-sculpin/datadir-scaling-mysql-1" pd.csi.storage.gke.io_gke-e79637b8881c4d69aadf-7f21-fd08-vm_1dfebf10-8ccc-4458-9c66-7c7a2d56f6ef logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:04 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulCreate create Claim datadir-scaling-mysql-1 Pod scaling-mysql-1 in StatefulSet scaling-mysql success statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:04 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulCreate create Pod scaling-mysql-1 in StatefulSet scaling-mysql successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:08 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-d8566d29-3a4d-466e-9a05-0f9ebc8a51a3 pd.csi.storage.gke.io_gke-e79637b8881c4d69aadf-7f21-fd08-vm_1dfebf10-8ccc-4458-9c66-7c7a2d56f6ef logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:08 +0000 UTC Normal Pod scaling-mysql-1 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-mysql-1 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-3w2c default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:10 +0000 UTC Normal Pod scaling-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-haproxy-0 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-58cc default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:10 +0000 UTC Normal StatefulSet.apps scaling-haproxy SuccessfulCreate create Pod scaling-haproxy-0 in StatefulSet scaling-haproxy successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:11 +0000 UTC Normal Pod scaling-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:11 +0000 UTC Normal Pod scaling-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 101.505509ms (101.523727ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:11 +0000 UTC Normal Pod scaling-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:11 +0000 UTC Normal Pod scaling-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:13 +0000 UTC Normal Pod scaling-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:13 +0000 UTC Normal Pod scaling-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 79.862232ms (79.869485ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:13 +0000 UTC Normal Pod scaling-haproxy-0.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:13 +0000 UTC Normal Pod scaling-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:13 +0000 UTC Normal Pod scaling-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:13 +0000 UTC Normal Pod scaling-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 77.434566ms (77.441878ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:13 +0000 UTC Normal Pod scaling-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:13 +0000 UTC Normal Pod scaling-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:14 +0000 UTC Normal Pod scaling-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-haproxy-1 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-vcbp default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:14 +0000 UTC Normal StatefulSet.apps scaling-haproxy SuccessfulCreate create Pod scaling-haproxy-1 in StatefulSet scaling-haproxy successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:15 +0000 UTC Normal Pod scaling-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:15 +0000 UTC Normal Pod scaling-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 99.291234ms (99.309373ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:15 +0000 UTC Normal Pod scaling-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:15 +0000 UTC Normal Pod scaling-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:16 +0000 UTC Normal Pod scaling-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d8566d29-3a4d-466e-9a05-0f9ebc8a51a3" attachdetach-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 71.140553ms (71.152313ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-haproxy-1.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 100.164238ms (100.172562ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 103.273821ms (103.289858ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:17 +0000 UTC Normal Pod scaling-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:18 +0000 UTC Normal Pod scaling-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-haproxy-2 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-3w2c default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:18 +0000 UTC Normal StatefulSet.apps scaling-haproxy SuccessfulCreate create Pod scaling-haproxy-2 in StatefulSet scaling-haproxy successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 84.921343ms (85.014104ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 74.868356ms (78.645358ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 96.732771ms (96.741281ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:19 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:20 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 99.345045ms (99.352905ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:20 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:20 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:21 +0000 UTC Normal Pod scaling-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:21 +0000 UTC Normal Pod scaling-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 83.400049ms (83.434903ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:21 +0000 UTC Normal Pod scaling-haproxy-2.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:21 +0000 UTC Normal Pod scaling-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:21 +0000 UTC Normal Pod scaling-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:21 +0000 UTC Normal Pod scaling-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 80.199505ms (80.206446ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:21 +0000 UTC Normal Pod scaling-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:21 +0000 UTC Normal Pod scaling-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:25 +0000 UTC Normal Pod scaling-orc-2 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-orc-2 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-58cc default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:25 +0000 UTC Normal StatefulSet.apps scaling-orc SuccessfulCreate create Pod scaling-orc-2 in StatefulSet scaling-orc successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:26 +0000 UTC Normal Pod scaling-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:26 +0000 UTC Normal Pod scaling-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 121.35049ms (121.363029ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:26 +0000 UTC Normal Pod scaling-orc-2.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:26 +0000 UTC Normal Pod scaling-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:27 +0000 UTC Normal Pod scaling-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:27 +0000 UTC Normal Pod scaling-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 78.54022ms (78.552921ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:27 +0000 UTC Normal Pod scaling-orc-2.spec.containers{orc} Created Created container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:27 +0000 UTC Normal Pod scaling-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:27 +0000 UTC Normal Pod scaling-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:27 +0000 UTC Normal Pod scaling-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 83.119659ms (83.127024ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:27 +0000 UTC Normal Pod scaling-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:27 +0000 UTC Normal Pod scaling-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:38 +0000 UTC Warning Pod scaling-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:38 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:49:41 +0000 UTC Normal Pod scaling-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 84.56689ms (84.58497ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:12 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:12 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-2 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:12 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-useful-sculpin/datadir-scaling-mysql-2" pd.csi.storage.gke.io_gke-e79637b8881c4d69aadf-7f21-fd08-vm_1dfebf10-8ccc-4458-9c66-7c7a2d56f6ef logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:12 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulCreate create Claim datadir-scaling-mysql-2 Pod scaling-mysql-2 in StatefulSet scaling-mysql success statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:12 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulCreate create Pod scaling-mysql-2 in StatefulSet scaling-mysql successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:16 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-193d4782-6b67-4b25-8871-3c4e315e8b37 pd.csi.storage.gke.io_gke-e79637b8881c4d69aadf-7f21-fd08-vm_1dfebf10-8ccc-4458-9c66-7c7a2d56f6ef logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:16 +0000 UTC Normal Pod scaling-mysql-2 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-mysql-2 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-58cc default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:31 +0000 UTC Normal Pod scaling-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-193d4782-6b67-4b25-8871-3c4e315e8b37" attachdetach-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:32 +0000 UTC Normal Pod scaling-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:32 +0000 UTC Normal Pod scaling-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 104.972994ms (104.987677ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:32 +0000 UTC Normal Pod scaling-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:32 +0000 UTC Normal Pod scaling-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:33 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:33 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 78.289947ms (78.302625ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:33 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:33 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:33 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:34 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 154.461811ms (154.472532ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:34 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:34 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:34 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:34 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 96.062479ms (96.068914ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:34 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:34 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:53 +0000 UTC Warning Pod scaling-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:53 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:50:56 +0000 UTC Normal Pod scaling-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 133.626014ms (133.645665ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:02 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-3 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:02 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-3 Provisioning External provisioner is provisioning volume for claim "kuttl-test-useful-sculpin/datadir-scaling-mysql-3" pd.csi.storage.gke.io_gke-e79637b8881c4d69aadf-7f21-fd08-vm_1dfebf10-8ccc-4458-9c66-7c7a2d56f6ef logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:02 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-3 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:02 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulCreate create Claim datadir-scaling-mysql-3 Pod scaling-mysql-3 in StatefulSet scaling-mysql success statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:02 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulCreate create Pod scaling-mysql-3 in StatefulSet scaling-mysql successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:02 +0000 UTC Normal Pod scaling-orc-3 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-orc-3 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-3w2c default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:02 +0000 UTC Normal StatefulSet.apps scaling-orc SuccessfulCreate create Pod scaling-orc-3 in StatefulSet scaling-orc successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:03 +0000 UTC Normal Pod scaling-orc-3.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:03 +0000 UTC Normal Pod scaling-orc-3.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 117.72092ms (117.741613ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:03 +0000 UTC Normal Pod scaling-orc-3.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:03 +0000 UTC Normal Pod scaling-orc-3.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:05 +0000 UTC Normal Pod scaling-orc-3.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:05 +0000 UTC Normal Pod scaling-orc-3.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 90.581893ms (90.594148ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:05 +0000 UTC Normal Pod scaling-orc-3.spec.containers{orc} Created Created container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:05 +0000 UTC Normal Pod scaling-orc-3.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:05 +0000 UTC Normal Pod scaling-orc-3.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:05 +0000 UTC Normal Pod scaling-orc-3.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 101.430949ms (101.44926ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:05 +0000 UTC Normal Pod scaling-orc-3.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:05 +0000 UTC Normal Pod scaling-orc-3.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:06 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-3 ProvisioningSucceeded Successfully provisioned volume pvc-8a3b58f9-6a53-4f2a-9ef3-11ea7802d765 pd.csi.storage.gke.io_gke-e79637b8881c4d69aadf-7f21-fd08-vm_1dfebf10-8ccc-4458-9c66-7c7a2d56f6ef logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:07 +0000 UTC Normal Pod scaling-mysql-3 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-mysql-3 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-vcbp default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:14 +0000 UTC Normal Pod scaling-mysql-3 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-8a3b58f9-6a53-4f2a-9ef3-11ea7802d765" attachdetach-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:16 +0000 UTC Normal Pod scaling-mysql-3.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:16 +0000 UTC Normal Pod scaling-mysql-3.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 105.983217ms (106.011517ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:16 +0000 UTC Normal Pod scaling-mysql-3.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:16 +0000 UTC Normal Pod scaling-mysql-3.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:17 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:17 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 74.97478ms (74.993076ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:17 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:18 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:18 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:18 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 88.651282ms (88.659236ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:18 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:18 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:18 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:18 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 67.54409ms (67.558392ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:18 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:18 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:36 +0000 UTC Warning Pod scaling-mysql-3.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:36 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:38 +0000 UTC Normal Pod scaling-orc-4 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-orc-4 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-58cc default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:38 +0000 UTC Normal StatefulSet.apps scaling-orc SuccessfulCreate create Pod scaling-orc-4 in StatefulSet scaling-orc successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:39 +0000 UTC Normal Pod scaling-orc-4.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:39 +0000 UTC Normal Pod scaling-orc-4.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 110.026737ms (110.058797ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:39 +0000 UTC Normal Pod scaling-orc-4.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:39 +0000 UTC Normal Pod scaling-orc-4.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:40 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 102.268954ms (102.283054ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:41 +0000 UTC Normal Pod scaling-orc-4.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:41 +0000 UTC Normal Pod scaling-orc-4.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 78.243996ms (78.256159ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:41 +0000 UTC Normal Pod scaling-orc-4.spec.containers{orc} Created Created container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:41 +0000 UTC Normal Pod scaling-orc-4.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:41 +0000 UTC Normal Pod scaling-orc-4.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:41 +0000 UTC Normal Pod scaling-orc-4.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 76.329818ms (76.337074ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:41 +0000 UTC Normal Pod scaling-orc-4.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:52:41 +0000 UTC Normal Pod scaling-orc-4.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:10 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-4 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:10 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulCreate create Claim datadir-scaling-mysql-4 Pod scaling-mysql-4 in StatefulSet scaling-mysql success statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:11 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-4 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:11 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-4 Provisioning External provisioner is provisioning volume for claim "kuttl-test-useful-sculpin/datadir-scaling-mysql-4" pd.csi.storage.gke.io_gke-e79637b8881c4d69aadf-7f21-fd08-vm_1dfebf10-8ccc-4458-9c66-7c7a2d56f6ef logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:11 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulCreate (combined from similar events): create Pod scaling-mysql-4 in StatefulSet scaling-mysql successful logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:14 +0000 UTC Normal PersistentVolumeClaim datadir-scaling-mysql-4 ProvisioningSucceeded Successfully provisioned volume pvc-e4a582ad-6f8a-4f40-818c-225eab6a6726 pd.csi.storage.gke.io_gke-e79637b8881c4d69aadf-7f21-fd08-vm_1dfebf10-8ccc-4458-9c66-7c7a2d56f6ef logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:15 +0000 UTC Normal Pod scaling-mysql-4 Binding Scheduled Successfully assigned kuttl-test-useful-sculpin/scaling-mysql-4 to gke-jen-ps-523-f00253e-4-default-pool-2999dbe8-3w2c default-scheduler logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:23 +0000 UTC Normal Pod scaling-mysql-4 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e4a582ad-6f8a-4f40-818c-225eab6a6726" attachdetach-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:24 +0000 UTC Normal Pod scaling-mysql-4.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:24 +0000 UTC Normal Pod scaling-mysql-4.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 108.440518ms (108.475591ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:24 +0000 UTC Normal Pod scaling-mysql-4.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:25 +0000 UTC Normal Pod scaling-mysql-4.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 77.318846ms (77.332289ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 79.180213ms (79.204813ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 79.752436ms (79.761379ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:26 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:45 +0000 UTC Warning Pod scaling-mysql-4.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:45 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:53:48 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 75.656626ms (75.678196ms including waiting) kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:54:44 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:54:44 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:54:44 +0000 UTC Normal Pod scaling-mysql-4.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:54:44 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulDelete delete Pod scaling-mysql-4 in StatefulSet scaling-mysql successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:54:44 +0000 UTC Normal Pod scaling-orc-4.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:54:44 +0000 UTC Normal Pod scaling-orc-4.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:54:44 +0000 UTC Normal StatefulSet.apps scaling-orc SuccessfulDelete delete Pod scaling-orc-4 in StatefulSet scaling-orc successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:54:48 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:54:48 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:54:48 +0000 UTC Normal StatefulSet.apps scaling-mysql SuccessfulDelete delete Pod scaling-mysql-3 in StatefulSet scaling-mysql successful statefulset-controller logger.go:42: 14:55:55 | scaling | 2024-02-05 14:54:49 +0000 UTC Normal Pod scaling-mysql-3.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:55:15 +0000 UTC Normal Pod scaling-orc-3.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:55:15 +0000 UTC Normal Pod scaling-orc-3.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:55:55 | scaling | 2024-02-05 14:55:15 +0000 UTC Normal StatefulSet.apps scaling-orc SuccessfulDelete delete Pod scaling-orc-3 in StatefulSet scaling-orc successful statefulset-controller logger.go:42: 14:55:55 | scaling | Deleting namespace: kuttl-test-useful-sculpin === CONT kuttl harness.go:405: run tests finished harness.go:513: cleaning up harness.go:570: removing temp folder: "" --- PASS: kuttl (527.46s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/scaling (524.61s) PASS