=== RUN kuttl harness.go:459: starting setup harness.go:254: running tests using configured kubeconfig. harness.go:277: Successful connection to cluster at: https://34.31.51.163 harness.go:362: running tests harness.go:74: going to run test suite with timeout of 180 seconds for each step harness.go:374: testsuite: e2e-tests/tests has 36 tests === RUN kuttl/harness === RUN kuttl/harness/async-upgrade === PAUSE kuttl/harness/async-upgrade === CONT kuttl/harness/async-upgrade logger.go:42: 14:45:00 | async-upgrade | Creating namespace: kuttl-test-causal-koala logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep git_tag="v$(latest_operator_version_in_vs)" deploy_operator_gh ${git_tag} deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | + source ../../functions logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ realpath ../../.. logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | ++++ pwd logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/tests/async-upgrade logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | ++ test_name=async-upgrade logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/deploy logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/deploy logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/conf logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/conf logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/async-upgrade logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/async-upgrade logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export GIT_BRANCH=PR-918 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ GIT_BRANCH=PR-918 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export VERSION=PR-918-24aa4dce logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ VERSION=PR-918-24aa4dce logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++++ dirname /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | ++++ /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/../pkg/version/version.txt logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh: line 12: realpath: No such file or directory logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export OPERATOR_VERSION= logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ OPERATOR_VERSION= logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | ++++ which gdate logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-918/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | ++++ which date logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ oc get projects logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ : logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ kubectl get nodes logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ grep '^minikube' logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | ++ oc get projects logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | + init_temp_dir logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | + rm -rf /tmp/kuttl/ps/async-upgrade logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/async-upgrade logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | ++ latest_operator_version_in_vs logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ curl -s https://check.percona.com/versions/v1/ps-operator logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ jq -r '.versions[].operator' logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ sort -V logger.go:42: 14:45:01 | async-upgrade/0-deploy-operator | +++ tail -n1 logger.go:42: 14:45:02 | async-upgrade/0-deploy-operator | ++ local latest=0.10.0 logger.go:42: 14:45:02 | async-upgrade/0-deploy-operator | ++ [[ 0.10.0 == '' ]] logger.go:42: 14:45:02 | async-upgrade/0-deploy-operator | ++ echo 0.10.0 logger.go:42: 14:45:02 | async-upgrade/0-deploy-operator | + git_tag=v0.10.0 logger.go:42: 14:45:02 | async-upgrade/0-deploy-operator | + deploy_operator_gh v0.10.0 logger.go:42: 14:45:02 | async-upgrade/0-deploy-operator | + local git_tag=v0.10.0 logger.go:42: 14:45:02 | async-upgrade/0-deploy-operator | + echo 'applying v0.10.0/deploy/crd.yaml' logger.go:42: 14:45:02 | async-upgrade/0-deploy-operator | applying v0.10.0/deploy/crd.yaml logger.go:42: 14:45:02 | async-upgrade/0-deploy-operator | + kubectl apply --server-side --force-conflicts -f https://raw.githubusercontent.com/percona/percona-server-mysql-operator/v0.10.0/deploy/crd.yaml logger.go:42: 14:45:03 | async-upgrade/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 14:45:03 | async-upgrade/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 14:45:04 | async-upgrade/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 14:45:04 | async-upgrade/0-deploy-operator | + local rbac=rbac logger.go:42: 14:45:04 | async-upgrade/0-deploy-operator | + local operator=operator logger.go:42: 14:45:04 | async-upgrade/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 14:45:04 | async-upgrade/0-deploy-operator | + rbac=cw-rbac logger.go:42: 14:45:04 | async-upgrade/0-deploy-operator | + operator=cw-operator logger.go:42: 14:45:04 | async-upgrade/0-deploy-operator | + create_namespace ps-operator logger.go:42: 14:45:04 | async-upgrade/0-deploy-operator | + local namespace=ps-operator logger.go:42: 14:45:04 | async-upgrade/0-deploy-operator | + [[ -n '' ]] logger.go:42: 14:45:04 | async-upgrade/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 14:45:05 | async-upgrade/0-deploy-operator | namespace "ps-operator" deleted logger.go:42: 14:45:13 | async-upgrade/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 14:45:13 | async-upgrade/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 14:45:14 | async-upgrade/0-deploy-operator | namespace/ps-operator created logger.go:42: 14:45:14 | async-upgrade/0-deploy-operator | + echo 'applying v0.10.0/deploy/cw-rbac.yaml' logger.go:42: 14:45:14 | async-upgrade/0-deploy-operator | applying v0.10.0/deploy/cw-rbac.yaml logger.go:42: 14:45:14 | async-upgrade/0-deploy-operator | + kubectl apply -n ps-operator -f https://raw.githubusercontent.com/percona/percona-server-mysql-operator/v0.10.0/deploy/cw-rbac.yaml logger.go:42: 14:45:15 | async-upgrade/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 14:45:17 | async-upgrade/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 14:45:17 | async-upgrade/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 14:45:17 | async-upgrade/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 14:45:17 | async-upgrade/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 14:45:17 | async-upgrade/0-deploy-operator | + echo 'applying v0.10.0/deploy/cw-operator.yaml' logger.go:42: 14:45:17 | async-upgrade/0-deploy-operator | applying v0.10.0/deploy/cw-operator.yaml logger.go:42: 14:45:17 | async-upgrade/0-deploy-operator | + curl -s https://raw.githubusercontent.com/percona/percona-server-mysql-operator/v0.10.0/deploy/cw-operator.yaml logger.go:42: 14:45:17 | async-upgrade/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 14:45:17 | async-upgrade/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 14:45:17 | async-upgrade/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 14:45:18 | async-upgrade/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 14:45:19 | async-upgrade/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 14:45:19 | async-upgrade/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 14:45:19 | async-upgrade/0-deploy-operator | + kubectl -n kuttl-test-causal-koala apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/conf/secrets.yaml logger.go:42: 14:45:20 | async-upgrade/0-deploy-operator | secret/test-secrets created logger.go:42: 14:45:20 | async-upgrade/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 14:45:20 | async-upgrade/0-deploy-operator | + kubectl -n kuttl-test-causal-koala apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/conf/ssl-secret.yaml logger.go:42: 14:45:20 | async-upgrade/0-deploy-operator | secret/test-ssl created logger.go:42: 14:45:20 | async-upgrade/0-deploy-operator | + deploy_client logger.go:42: 14:45:20 | async-upgrade/0-deploy-operator | + kubectl -n kuttl-test-causal-koala apply -f - logger.go:42: 14:45:20 | async-upgrade/0-deploy-operator | ++ printf '.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:45:20 | async-upgrade/0-deploy-operator | + yq eval '.spec.containers[0].image="perconalab/percona-server-mysql-operator:main-psmysql"' /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/conf/client.yaml logger.go:42: 14:45:22 | async-upgrade/0-deploy-operator | pod/mysql-client created logger.go:42: 14:45:22 | async-upgrade/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:45:22 | async-upgrade/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:45:22 | async-upgrade/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:45:24 | async-upgrade/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:45:24 | async-upgrade/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:45:24 | async-upgrade/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:45:25 | async-upgrade/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:45:25 | async-upgrade/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:45:26 | async-upgrade/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:45:27 | async-upgrade/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:45:27 | async-upgrade/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:45:28 | async-upgrade/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:45:29 | async-upgrade/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:45:29 | async-upgrade/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:45:29 | async-upgrade/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:45:30 | async-upgrade/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:45:30 | async-upgrade/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:45:31 | async-upgrade/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 14:45:31 | async-upgrade/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 14:45:31 | async-upgrade/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 14:45:31 | async-upgrade/0-deploy-operator | ASSERT PASS logger.go:42: 14:45:31 | async-upgrade/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | starting test step 1-create-cluster logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions version=$(latest_operator_version_in_vs) init_image=$(get_operator_image) if [[ -z ${init_image} ]]; then echo "failed to get operator image" exit 1 fi get_cr_with_latest_versions_in_vs \ | yq eval "$(printf '.spec.initImage="%s"' "${init_image}")" - \ | yq eval "$(printf '.spec.crVersion="%s"' "${version}")" - \ | yq eval '.spec.mysql.clusterType="async"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | + source ../../functions logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ realpath ../../.. logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | ++++ pwd logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/tests/async-upgrade logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | ++ test_name=async-upgrade logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/deploy logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/deploy logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/conf logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/conf logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/async-upgrade logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/async-upgrade logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export GIT_BRANCH=PR-918 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ GIT_BRANCH=PR-918 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export VERSION=PR-918-24aa4dce logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ VERSION=PR-918-24aa4dce logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++++ dirname /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | ++++ /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/../pkg/version/version.txt logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh: line 12: realpath: No such file or directory logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export OPERATOR_VERSION= logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ OPERATOR_VERSION= logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | ++++ which gdate logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-918/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | ++++ which date logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ date=/usr/bin/date logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ oc get projects logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ : logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ kubectl get nodes logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ grep '^minikube' logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | ++ oc get projects logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | ++ latest_operator_version_in_vs logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ curl -s https://check.percona.com/versions/v1/ps-operator logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ jq -r '.versions[].operator' logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ sort -V logger.go:42: 14:45:31 | async-upgrade/1-create-cluster | +++ tail -n1 logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | ++ local latest=0.10.0 logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | ++ [[ 0.10.0 == '' ]] logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | ++ echo 0.10.0 logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | + version=0.10.0 logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | ++ get_operator_image logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | ++ kubectl get pods -n ps-operator --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].spec.containers[].image}' logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | + init_image=percona/percona-server-mysql-operator:0.10.0 logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | + [[ -z percona/percona-server-mysql-operator:0.10.0 ]] logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | + get_cr_with_latest_versions_in_vs logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | ++ latest_operator_version_in_vs logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | ++ printf '.spec.initImage="%s"' percona/percona-server-mysql-operator:0.10.0 logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | + kubectl -n kuttl-test-causal-koala apply -f - logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | ++ printf '.spec.crVersion="%s"' 0.10.0 logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | + yq eval '.spec.initImage="percona/percona-server-mysql-operator:0.10.0"' - logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | + yq eval '.spec.crVersion="0.10.0"' - logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | +++ curl -s https://check.percona.com/versions/v1/ps-operator logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | +++ jq -r '.versions[].operator' logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | +++ sort -V logger.go:42: 14:45:32 | async-upgrade/1-create-cluster | +++ tail -n1 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ local latest=0.10.0 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ [[ 0.10.0 == '' ]] logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ echo 0.10.0 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local version=0.10.0 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ curl -s https://check.percona.com/versions/v1/ps-operator/0.10.0/latest logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local 'latest_versions={"versions":[{"product":"ps-operator", "operator":"0.10.0", "matrix":{"mongod":{}, "pxc":{}, "pmm":{"3.2.0":{"imagePath":"percona/pmm-client:3.2.0", "imageHash":"7b1d1798b6446d6c3d5e4005fd9c07be9f4be5859ac2fae908be387cf7b0f50c", "imageHashArm64":"1a36eb47e39dcd275c5ed62da8415c862e560933f48790bbf9b78f41cd3dfd10", "status":"recommended", "critical":false}}, "proxysql":{}, "haproxy":{"2.8.14":{"imagePath":"percona/haproxy:2.8.14", "imageHash":"6de8c402d83b88dae7403c05183fd75100774defa887c05a57ec04bc25be2305", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35-33":{"imagePath":"percona/percona-xtrabackup:8.0.35-33", "imageHash":"57518571b4663ab492bbd2dc8369fea7e8d358b8e544ea8fa1c1eda12207b8e2", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"0.10.0":{"imagePath":"percona/percona-server-mysql-operator:0.10.0", "imageHash":"406cf9b929eb42a158fc05d6bbde3435d2c46c7fed0a53889d82b335334e8df2", "imageHashArm64":"0889abb9ef079efb164a1046393a5266cd30701fcd53c32db439a2ca93c6dceb", "status":"recommended", "critical":false}}, "logCollector":{}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{"8.0.42-33":{"imagePath":"percona/percona-server:8.0.42-33", "imageHash":"e30ad4bd3729f6a1ab443341a0a9ce10bbe70cb80d14e5e24a25da4bae4305da", "imageHashArm64":"", "status":"recommended", "critical":false}}, "router":{"8.0.42":{"imagePath":"percona/percona-mysql-router:8.0.42", "imageHash":"a6351fc5774086400f1d1dcf08f4f2d5975b97bc943d3dd98fb870e364066968", "imageHashArm64":"", "status":"recommended", "critical":false}}, "orchestrator":{"3.2.6-17":{"imagePath":"percona/percona-orchestrator:3.2.6-17", "imageHash":"c1871ddc6ff3eaca7bb03c3aa11db880ae02d623db1203d0858f8566f56ea5f7", "imageHashArm64":"", "status":"recommended", "critical":false}}, "toolkit":{"3.7.0":{"imagePath":"percona/percona-toolkit:3.7.0", "imageHash":"17ef2b69a97fa546d1f925c74ca09587ac215085c392761bb4d51f188baa6c0e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgis":{}}}]}' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ jq -r '.versions[].matrix.mysql[].imagePath' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ echo '{"versions":[{"product":"ps-operator",' '"operator":"0.10.0",' '"matrix":{"mongod":{},' '"pxc":{},' '"pmm":{"3.2.0":{"imagePath":"percona/pmm-client:3.2.0",' '"imageHash":"7b1d1798b6446d6c3d5e4005fd9c07be9f4be5859ac2fae908be387cf7b0f50c",' '"imageHashArm64":"1a36eb47e39dcd275c5ed62da8415c862e560933f48790bbf9b78f41cd3dfd10",' '"status":"recommended",' '"critical":false}},' '"proxysql":{},' '"haproxy":{"2.8.14":{"imagePath":"percona/haproxy:2.8.14",' '"imageHash":"6de8c402d83b88dae7403c05183fd75100774defa887c05a57ec04bc25be2305",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"backup":{"8.0.35-33":{"imagePath":"percona/percona-xtrabackup:8.0.35-33",' '"imageHash":"57518571b4663ab492bbd2dc8369fea7e8d358b8e544ea8fa1c1eda12207b8e2",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"operator":{"0.10.0":{"imagePath":"percona/percona-server-mysql-operator:0.10.0",' '"imageHash":"406cf9b929eb42a158fc05d6bbde3435d2c46c7fed0a53889d82b335334e8df2",' '"imageHashArm64":"0889abb9ef079efb164a1046393a5266cd30701fcd53c32db439a2ca93c6dceb",' '"status":"recommended",' '"critical":false}},' '"logCollector":{},' '"postgresql":{},' '"pgbackrest":{},' '"pgbackrestRepo":{},' '"pgbadger":{},' '"pgbouncer":{},' '"pxcOperator":{},' '"psmdbOperator":{},' '"pgOperatorApiserver":{},' '"pgOperatorEvent":{},' '"pgOperatorRmdata":{},' '"pgOperatorScheduler":{},' '"pgOperator":{},' '"pgOperatorDeployer":{},' '"psOperator":{},' '"mysql":{"8.0.42-33":{"imagePath":"percona/percona-server:8.0.42-33",' '"imageHash":"e30ad4bd3729f6a1ab443341a0a9ce10bbe70cb80d14e5e24a25da4bae4305da",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"router":{"8.0.42":{"imagePath":"percona/percona-mysql-router:8.0.42",' '"imageHash":"a6351fc5774086400f1d1dcf08f4f2d5975b97bc943d3dd98fb870e364066968",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"orchestrator":{"3.2.6-17":{"imagePath":"percona/percona-orchestrator:3.2.6-17",' '"imageHash":"c1871ddc6ff3eaca7bb03c3aa11db880ae02d623db1203d0858f8566f56ea5f7",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"toolkit":{"3.7.0":{"imagePath":"percona/percona-toolkit:3.7.0",' '"imageHash":"17ef2b69a97fa546d1f925c74ca09587ac215085c392761bb4d51f188baa6c0e",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"postgis":{}}}]}' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_mysql=percona/percona-server:8.0.42-33 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ jq -r '.versions[].matrix.backup[].imagePath' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ echo '{"versions":[{"product":"ps-operator",' '"operator":"0.10.0",' '"matrix":{"mongod":{},' '"pxc":{},' '"pmm":{"3.2.0":{"imagePath":"percona/pmm-client:3.2.0",' '"imageHash":"7b1d1798b6446d6c3d5e4005fd9c07be9f4be5859ac2fae908be387cf7b0f50c",' '"imageHashArm64":"1a36eb47e39dcd275c5ed62da8415c862e560933f48790bbf9b78f41cd3dfd10",' '"status":"recommended",' '"critical":false}},' '"proxysql":{},' '"haproxy":{"2.8.14":{"imagePath":"percona/haproxy:2.8.14",' '"imageHash":"6de8c402d83b88dae7403c05183fd75100774defa887c05a57ec04bc25be2305",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"backup":{"8.0.35-33":{"imagePath":"percona/percona-xtrabackup:8.0.35-33",' '"imageHash":"57518571b4663ab492bbd2dc8369fea7e8d358b8e544ea8fa1c1eda12207b8e2",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"operator":{"0.10.0":{"imagePath":"percona/percona-server-mysql-operator:0.10.0",' '"imageHash":"406cf9b929eb42a158fc05d6bbde3435d2c46c7fed0a53889d82b335334e8df2",' '"imageHashArm64":"0889abb9ef079efb164a1046393a5266cd30701fcd53c32db439a2ca93c6dceb",' '"status":"recommended",' '"critical":false}},' '"logCollector":{},' '"postgresql":{},' '"pgbackrest":{},' '"pgbackrestRepo":{},' '"pgbadger":{},' '"pgbouncer":{},' '"pxcOperator":{},' '"psmdbOperator":{},' '"pgOperatorApiserver":{},' '"pgOperatorEvent":{},' '"pgOperatorRmdata":{},' '"pgOperatorScheduler":{},' '"pgOperator":{},' '"pgOperatorDeployer":{},' '"psOperator":{},' '"mysql":{"8.0.42-33":{"imagePath":"percona/percona-server:8.0.42-33",' '"imageHash":"e30ad4bd3729f6a1ab443341a0a9ce10bbe70cb80d14e5e24a25da4bae4305da",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"router":{"8.0.42":{"imagePath":"percona/percona-mysql-router:8.0.42",' '"imageHash":"a6351fc5774086400f1d1dcf08f4f2d5975b97bc943d3dd98fb870e364066968",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"orchestrator":{"3.2.6-17":{"imagePath":"percona/percona-orchestrator:3.2.6-17",' '"imageHash":"c1871ddc6ff3eaca7bb03c3aa11db880ae02d623db1203d0858f8566f56ea5f7",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"toolkit":{"3.7.0":{"imagePath":"percona/percona-toolkit:3.7.0",' '"imageHash":"17ef2b69a97fa546d1f925c74ca09587ac215085c392761bb4d51f188baa6c0e",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"postgis":{}}}]}' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_backup=percona/percona-xtrabackup:8.0.35-33 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ jq -r '.versions[].matrix.orchestrator[].imagePath' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ echo '{"versions":[{"product":"ps-operator",' '"operator":"0.10.0",' '"matrix":{"mongod":{},' '"pxc":{},' '"pmm":{"3.2.0":{"imagePath":"percona/pmm-client:3.2.0",' '"imageHash":"7b1d1798b6446d6c3d5e4005fd9c07be9f4be5859ac2fae908be387cf7b0f50c",' '"imageHashArm64":"1a36eb47e39dcd275c5ed62da8415c862e560933f48790bbf9b78f41cd3dfd10",' '"status":"recommended",' '"critical":false}},' '"proxysql":{},' '"haproxy":{"2.8.14":{"imagePath":"percona/haproxy:2.8.14",' '"imageHash":"6de8c402d83b88dae7403c05183fd75100774defa887c05a57ec04bc25be2305",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"backup":{"8.0.35-33":{"imagePath":"percona/percona-xtrabackup:8.0.35-33",' '"imageHash":"57518571b4663ab492bbd2dc8369fea7e8d358b8e544ea8fa1c1eda12207b8e2",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"operator":{"0.10.0":{"imagePath":"percona/percona-server-mysql-operator:0.10.0",' '"imageHash":"406cf9b929eb42a158fc05d6bbde3435d2c46c7fed0a53889d82b335334e8df2",' '"imageHashArm64":"0889abb9ef079efb164a1046393a5266cd30701fcd53c32db439a2ca93c6dceb",' '"status":"recommended",' '"critical":false}},' '"logCollector":{},' '"postgresql":{},' '"pgbackrest":{},' '"pgbackrestRepo":{},' '"pgbadger":{},' '"pgbouncer":{},' '"pxcOperator":{},' '"psmdbOperator":{},' '"pgOperatorApiserver":{},' '"pgOperatorEvent":{},' '"pgOperatorRmdata":{},' '"pgOperatorScheduler":{},' '"pgOperator":{},' '"pgOperatorDeployer":{},' '"psOperator":{},' '"mysql":{"8.0.42-33":{"imagePath":"percona/percona-server:8.0.42-33",' '"imageHash":"e30ad4bd3729f6a1ab443341a0a9ce10bbe70cb80d14e5e24a25da4bae4305da",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"router":{"8.0.42":{"imagePath":"percona/percona-mysql-router:8.0.42",' '"imageHash":"a6351fc5774086400f1d1dcf08f4f2d5975b97bc943d3dd98fb870e364066968",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"orchestrator":{"3.2.6-17":{"imagePath":"percona/percona-orchestrator:3.2.6-17",' '"imageHash":"c1871ddc6ff3eaca7bb03c3aa11db880ae02d623db1203d0858f8566f56ea5f7",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"toolkit":{"3.7.0":{"imagePath":"percona/percona-toolkit:3.7.0",' '"imageHash":"17ef2b69a97fa546d1f925c74ca09587ac215085c392761bb4d51f188baa6c0e",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"postgis":{}}}]}' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_orchestrator=percona/percona-orchestrator:3.2.6-17 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ jq -r '.versions[].matrix.orchestrator[].imagePath' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ echo '{"versions":[{"product":"ps-operator",' '"operator":"0.10.0",' '"matrix":{"mongod":{},' '"pxc":{},' '"pmm":{"3.2.0":{"imagePath":"percona/pmm-client:3.2.0",' '"imageHash":"7b1d1798b6446d6c3d5e4005fd9c07be9f4be5859ac2fae908be387cf7b0f50c",' '"imageHashArm64":"1a36eb47e39dcd275c5ed62da8415c862e560933f48790bbf9b78f41cd3dfd10",' '"status":"recommended",' '"critical":false}},' '"proxysql":{},' '"haproxy":{"2.8.14":{"imagePath":"percona/haproxy:2.8.14",' '"imageHash":"6de8c402d83b88dae7403c05183fd75100774defa887c05a57ec04bc25be2305",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"backup":{"8.0.35-33":{"imagePath":"percona/percona-xtrabackup:8.0.35-33",' '"imageHash":"57518571b4663ab492bbd2dc8369fea7e8d358b8e544ea8fa1c1eda12207b8e2",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"operator":{"0.10.0":{"imagePath":"percona/percona-server-mysql-operator:0.10.0",' '"imageHash":"406cf9b929eb42a158fc05d6bbde3435d2c46c7fed0a53889d82b335334e8df2",' '"imageHashArm64":"0889abb9ef079efb164a1046393a5266cd30701fcd53c32db439a2ca93c6dceb",' '"status":"recommended",' '"critical":false}},' '"logCollector":{},' '"postgresql":{},' '"pgbackrest":{},' '"pgbackrestRepo":{},' '"pgbadger":{},' '"pgbouncer":{},' '"pxcOperator":{},' '"psmdbOperator":{},' '"pgOperatorApiserver":{},' '"pgOperatorEvent":{},' '"pgOperatorRmdata":{},' '"pgOperatorScheduler":{},' '"pgOperator":{},' '"pgOperatorDeployer":{},' '"psOperator":{},' '"mysql":{"8.0.42-33":{"imagePath":"percona/percona-server:8.0.42-33",' '"imageHash":"e30ad4bd3729f6a1ab443341a0a9ce10bbe70cb80d14e5e24a25da4bae4305da",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"router":{"8.0.42":{"imagePath":"percona/percona-mysql-router:8.0.42",' '"imageHash":"a6351fc5774086400f1d1dcf08f4f2d5975b97bc943d3dd98fb870e364066968",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"orchestrator":{"3.2.6-17":{"imagePath":"percona/percona-orchestrator:3.2.6-17",' '"imageHash":"c1871ddc6ff3eaca7bb03c3aa11db880ae02d623db1203d0858f8566f56ea5f7",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"toolkit":{"3.7.0":{"imagePath":"percona/percona-toolkit:3.7.0",' '"imageHash":"17ef2b69a97fa546d1f925c74ca09587ac215085c392761bb4d51f188baa6c0e",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"postgis":{}}}]}' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_router=percona/percona-orchestrator:3.2.6-17 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ jq -r '.versions[].matrix.toolkit[].imagePath' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ echo '{"versions":[{"product":"ps-operator",' '"operator":"0.10.0",' '"matrix":{"mongod":{},' '"pxc":{},' '"pmm":{"3.2.0":{"imagePath":"percona/pmm-client:3.2.0",' '"imageHash":"7b1d1798b6446d6c3d5e4005fd9c07be9f4be5859ac2fae908be387cf7b0f50c",' '"imageHashArm64":"1a36eb47e39dcd275c5ed62da8415c862e560933f48790bbf9b78f41cd3dfd10",' '"status":"recommended",' '"critical":false}},' '"proxysql":{},' '"haproxy":{"2.8.14":{"imagePath":"percona/haproxy:2.8.14",' '"imageHash":"6de8c402d83b88dae7403c05183fd75100774defa887c05a57ec04bc25be2305",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"backup":{"8.0.35-33":{"imagePath":"percona/percona-xtrabackup:8.0.35-33",' '"imageHash":"57518571b4663ab492bbd2dc8369fea7e8d358b8e544ea8fa1c1eda12207b8e2",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"operator":{"0.10.0":{"imagePath":"percona/percona-server-mysql-operator:0.10.0",' '"imageHash":"406cf9b929eb42a158fc05d6bbde3435d2c46c7fed0a53889d82b335334e8df2",' '"imageHashArm64":"0889abb9ef079efb164a1046393a5266cd30701fcd53c32db439a2ca93c6dceb",' '"status":"recommended",' '"critical":false}},' '"logCollector":{},' '"postgresql":{},' '"pgbackrest":{},' '"pgbackrestRepo":{},' '"pgbadger":{},' '"pgbouncer":{},' '"pxcOperator":{},' '"psmdbOperator":{},' '"pgOperatorApiserver":{},' '"pgOperatorEvent":{},' '"pgOperatorRmdata":{},' '"pgOperatorScheduler":{},' '"pgOperator":{},' '"pgOperatorDeployer":{},' '"psOperator":{},' '"mysql":{"8.0.42-33":{"imagePath":"percona/percona-server:8.0.42-33",' '"imageHash":"e30ad4bd3729f6a1ab443341a0a9ce10bbe70cb80d14e5e24a25da4bae4305da",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"router":{"8.0.42":{"imagePath":"percona/percona-mysql-router:8.0.42",' '"imageHash":"a6351fc5774086400f1d1dcf08f4f2d5975b97bc943d3dd98fb870e364066968",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"orchestrator":{"3.2.6-17":{"imagePath":"percona/percona-orchestrator:3.2.6-17",' '"imageHash":"c1871ddc6ff3eaca7bb03c3aa11db880ae02d623db1203d0858f8566f56ea5f7",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"toolkit":{"3.7.0":{"imagePath":"percona/percona-toolkit:3.7.0",' '"imageHash":"17ef2b69a97fa546d1f925c74ca09587ac215085c392761bb4d51f188baa6c0e",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"postgis":{}}}]}' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_toolkit=percona/percona-toolkit:3.7.0 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ jq -r '.versions[].matrix.haproxy[].imagePath' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ echo '{"versions":[{"product":"ps-operator",' '"operator":"0.10.0",' '"matrix":{"mongod":{},' '"pxc":{},' '"pmm":{"3.2.0":{"imagePath":"percona/pmm-client:3.2.0",' '"imageHash":"7b1d1798b6446d6c3d5e4005fd9c07be9f4be5859ac2fae908be387cf7b0f50c",' '"imageHashArm64":"1a36eb47e39dcd275c5ed62da8415c862e560933f48790bbf9b78f41cd3dfd10",' '"status":"recommended",' '"critical":false}},' '"proxysql":{},' '"haproxy":{"2.8.14":{"imagePath":"percona/haproxy:2.8.14",' '"imageHash":"6de8c402d83b88dae7403c05183fd75100774defa887c05a57ec04bc25be2305",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"backup":{"8.0.35-33":{"imagePath":"percona/percona-xtrabackup:8.0.35-33",' '"imageHash":"57518571b4663ab492bbd2dc8369fea7e8d358b8e544ea8fa1c1eda12207b8e2",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"operator":{"0.10.0":{"imagePath":"percona/percona-server-mysql-operator:0.10.0",' '"imageHash":"406cf9b929eb42a158fc05d6bbde3435d2c46c7fed0a53889d82b335334e8df2",' '"imageHashArm64":"0889abb9ef079efb164a1046393a5266cd30701fcd53c32db439a2ca93c6dceb",' '"status":"recommended",' '"critical":false}},' '"logCollector":{},' '"postgresql":{},' '"pgbackrest":{},' '"pgbackrestRepo":{},' '"pgbadger":{},' '"pgbouncer":{},' '"pxcOperator":{},' '"psmdbOperator":{},' '"pgOperatorApiserver":{},' '"pgOperatorEvent":{},' '"pgOperatorRmdata":{},' '"pgOperatorScheduler":{},' '"pgOperator":{},' '"pgOperatorDeployer":{},' '"psOperator":{},' '"mysql":{"8.0.42-33":{"imagePath":"percona/percona-server:8.0.42-33",' '"imageHash":"e30ad4bd3729f6a1ab443341a0a9ce10bbe70cb80d14e5e24a25da4bae4305da",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"router":{"8.0.42":{"imagePath":"percona/percona-mysql-router:8.0.42",' '"imageHash":"a6351fc5774086400f1d1dcf08f4f2d5975b97bc943d3dd98fb870e364066968",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"orchestrator":{"3.2.6-17":{"imagePath":"percona/percona-orchestrator:3.2.6-17",' '"imageHash":"c1871ddc6ff3eaca7bb03c3aa11db880ae02d623db1203d0858f8566f56ea5f7",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"toolkit":{"3.7.0":{"imagePath":"percona/percona-toolkit:3.7.0",' '"imageHash":"17ef2b69a97fa546d1f925c74ca09587ac215085c392761bb4d51f188baa6c0e",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"postgis":{}}}]}' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_haproxy=percona/haproxy:2.8.14 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ jq -r '.versions[].matrix.pmm[].imagePath' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ echo '{"versions":[{"product":"ps-operator",' '"operator":"0.10.0",' '"matrix":{"mongod":{},' '"pxc":{},' '"pmm":{"3.2.0":{"imagePath":"percona/pmm-client:3.2.0",' '"imageHash":"7b1d1798b6446d6c3d5e4005fd9c07be9f4be5859ac2fae908be387cf7b0f50c",' '"imageHashArm64":"1a36eb47e39dcd275c5ed62da8415c862e560933f48790bbf9b78f41cd3dfd10",' '"status":"recommended",' '"critical":false}},' '"proxysql":{},' '"haproxy":{"2.8.14":{"imagePath":"percona/haproxy:2.8.14",' '"imageHash":"6de8c402d83b88dae7403c05183fd75100774defa887c05a57ec04bc25be2305",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"backup":{"8.0.35-33":{"imagePath":"percona/percona-xtrabackup:8.0.35-33",' '"imageHash":"57518571b4663ab492bbd2dc8369fea7e8d358b8e544ea8fa1c1eda12207b8e2",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"operator":{"0.10.0":{"imagePath":"percona/percona-server-mysql-operator:0.10.0",' '"imageHash":"406cf9b929eb42a158fc05d6bbde3435d2c46c7fed0a53889d82b335334e8df2",' '"imageHashArm64":"0889abb9ef079efb164a1046393a5266cd30701fcd53c32db439a2ca93c6dceb",' '"status":"recommended",' '"critical":false}},' '"logCollector":{},' '"postgresql":{},' '"pgbackrest":{},' '"pgbackrestRepo":{},' '"pgbadger":{},' '"pgbouncer":{},' '"pxcOperator":{},' '"psmdbOperator":{},' '"pgOperatorApiserver":{},' '"pgOperatorEvent":{},' '"pgOperatorRmdata":{},' '"pgOperatorScheduler":{},' '"pgOperator":{},' '"pgOperatorDeployer":{},' '"psOperator":{},' '"mysql":{"8.0.42-33":{"imagePath":"percona/percona-server:8.0.42-33",' '"imageHash":"e30ad4bd3729f6a1ab443341a0a9ce10bbe70cb80d14e5e24a25da4bae4305da",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"router":{"8.0.42":{"imagePath":"percona/percona-mysql-router:8.0.42",' '"imageHash":"a6351fc5774086400f1d1dcf08f4f2d5975b97bc943d3dd98fb870e364066968",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"orchestrator":{"3.2.6-17":{"imagePath":"percona/percona-orchestrator:3.2.6-17",' '"imageHash":"c1871ddc6ff3eaca7bb03c3aa11db880ae02d623db1203d0858f8566f56ea5f7",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"toolkit":{"3.7.0":{"imagePath":"percona/percona-toolkit:3.7.0",' '"imageHash":"17ef2b69a97fa546d1f925c74ca09587ac215085c392761bb4d51f188baa6c0e",' '"imageHashArm64":"",' '"status":"recommended",' '"critical":false}},' '"postgis":{}}}]}' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_pmm_client=percona/pmm-client:3.2.0 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + get_cr '' percona/percona-server:8.0.42-33 percona/percona-xtrabackup:8.0.35-33 percona/percona-orchestrator:3.2.6-17 percona/percona-orchestrator:3.2.6-17 percona/percona-toolkit:3.7.0 percona/haproxy:2.8.14 percona/pmm-client:3.2.0 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local name_suffix= logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_mysql=percona/percona-server:8.0.42-33 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_backup=percona/percona-xtrabackup:8.0.35-33 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_orchestrator=percona/percona-orchestrator:3.2.6-17 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_router=percona/percona-orchestrator:3.2.6-17 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_toolkit=percona/percona-toolkit:3.7.0 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_haproxy=percona/haproxy:2.8.14 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + local image_pmm_client=percona/pmm-client:3.2.0 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ printf '.metadata.name="%s"' async-upgrade logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.metadata.name="async-upgrade"' /mnt/jenkins/workspace/cloud-ps-operator_PR-918/deploy/cr.yaml logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-918-24aa4dce"' - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ printf '.spec.backup.image="%s"' percona/percona-xtrabackup:8.0.35-33 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.backup.image="percona/percona-xtrabackup:8.0.35-33"' - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ printf '.spec.mysql.image="%s"' percona/percona-server:8.0.42-33 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.mysql.image="percona/percona-server:8.0.42-33"' - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ printf '.spec.orchestrator.image="%s"' percona/percona-orchestrator:3.2.6-17 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.orchestrator.image="percona/percona-orchestrator:3.2.6-17"' - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + '[' -n '' ']' logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ printf '.spec.proxy.router.image="%s"' percona/percona-orchestrator:3.2.6-17 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.proxy.router.image="percona/percona-orchestrator:3.2.6-17"' - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' percona/haproxy:2.8.14 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ printf '.spec.toolkit.image="%s"' percona/percona-toolkit:3.7.0 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.toolkit.image="percona/percona-toolkit:3.7.0"' - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.proxy.haproxy.image="percona/haproxy:2.8.14"' - logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | ++ printf '.spec.pmm.image="%s"' percona/pmm-client:3.2.0 logger.go:42: 14:45:33 | async-upgrade/1-create-cluster | + yq eval '.spec.pmm.image="percona/pmm-client:3.2.0"' - logger.go:42: 14:45:34 | async-upgrade/1-create-cluster | perconaservermysql.ps.percona.com/async-upgrade created logger.go:42: 14:48:47 | async-upgrade/1-create-cluster | test step completed 1-create-cluster logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | starting test step 2-upgrade-operator logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions upgrade_operator_image ${IMAGE} wait_deployment percona-server-mysql-operator ${OPERATOR_NS:-$NAMESPACE}] logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | + source ../../functions logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ realpath ../../.. logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | ++++ pwd logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/tests/async-upgrade logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | ++ test_name=async-upgrade logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/deploy logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/deploy logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/conf logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/conf logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/async-upgrade logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ TEMP_DIR=/tmp/kuttl/ps/async-upgrade logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export GIT_BRANCH=PR-918 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ GIT_BRANCH=PR-918 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export VERSION=PR-918-24aa4dce logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ VERSION=PR-918-24aa4dce logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++++ dirname /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | ++++ /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/../pkg/version/version.txt logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh: line 12: realpath: No such file or directory logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export OPERATOR_VERSION= logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ OPERATOR_VERSION= logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ MINIO_VER=5.4.0 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | ++++ which gdate logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-918/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | ++++ which date logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ date=/usr/bin/date logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ oc get projects logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ : logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ kubectl get nodes logger.go:42: 14:48:47 | async-upgrade/2-upgrade-operator | +++ grep '^minikube' logger.go:42: 14:48:48 | async-upgrade/2-upgrade-operator | ++ oc get projects logger.go:42: 14:48:48 | async-upgrade/2-upgrade-operator | + upgrade_operator_image perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:48:48 | async-upgrade/2-upgrade-operator | + local image=perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:48:48 | async-upgrade/2-upgrade-operator | + kubectl -n ps-operator set image deployment/percona-server-mysql-operator manager=perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:48:48 | async-upgrade/2-upgrade-operator | deployment.apps/percona-server-mysql-operator image updated logger.go:42: 14:48:48 | async-upgrade/2-upgrade-operator | + wait_deployment percona-server-mysql-operator ps-operator logger.go:42: 14:48:48 | async-upgrade/2-upgrade-operator | + local name=percona-server-mysql-operator logger.go:42: 14:48:48 | async-upgrade/2-upgrade-operator | + local target_namespace=ps-operator logger.go:42: 14:48:48 | async-upgrade/2-upgrade-operator | + sleep 10 logger.go:42: 14:48:58 | async-upgrade/2-upgrade-operator | + set +o xtrace logger.go:42: 14:48:59 | async-upgrade/2-upgrade-operator | percona-server-mysql-operator logger.go:42: 14:48:59 | async-upgrade/2-upgrade-operator | test step completed 2-upgrade-operator logger.go:42: 14:48:59 | async-upgrade/3-ensure-no-rollout | starting test step 3-ensure-no-rollout logger.go:42: 14:48:59 | async-upgrade/3-ensure-no-rollout | running command: [sh -c sleep 25] logger.go:42: 14:49:26 | async-upgrade/3-ensure-no-rollout | test step completed 3-ensure-no-rollout logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | starting test step 4-upgrade-cluster logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval "$(printf '.spec.initImage="%s"' "${IMAGE}")" - \ | yq eval "$(printf '.spec.crVersion="%s"' "${OPERATOR_VERSION}")" - \ | yq eval '.spec.mysql.clusterType="async"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + source ../../functions logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ realpath ../../.. logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++++ pwd logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/tests/async-upgrade logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ test_name=async-upgrade logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-918 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/deploy logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/deploy logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/conf logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/conf logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/async-upgrade logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/async-upgrade logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export GIT_BRANCH=PR-918 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ GIT_BRANCH=PR-918 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export VERSION=PR-918-24aa4dce logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ VERSION=PR-918-24aa4dce logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++++ dirname /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++++ /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/../pkg/version/version.txt logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | /mnt/jenkins/workspace/cloud-ps-operator_PR-918/e2e-tests/vars.sh: line 12: realpath: No such file or directory logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export OPERATOR_VERSION= logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ OPERATOR_VERSION= logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++++ which gdate logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-918/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++++ which date logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ date=/usr/bin/date logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ oc get projects logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ : logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ kubectl get nodes logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | +++ grep '^minikube' logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ oc get projects logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + get_cr logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + local name_suffix= logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + local image_mysql=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + local image_backup=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + local image_orchestrator=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + local image_router=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + local image_toolkit=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + local image_haproxy=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + local image_pmm_client=perconalab/pmm-client:3-dev-latest logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + kubectl -n kuttl-test-causal-koala apply -f - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ printf '.spec.crVersion="%s"' '' logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-918-24aa4dce"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.crVersion=""' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-918-24aa4dce logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ printf '.metadata.name="%s"' async-upgrade logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-918-24aa4dce"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.metadata.name="async-upgrade"' /mnt/jenkins/workspace/cloud-ps-operator_PR-918/deploy/cr.yaml logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + '[' -n '' ']' logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 14:49:26 | async-upgrade/4-upgrade-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 14:49:28 | async-upgrade/4-upgrade-cluster | perconaservermysql.ps.percona.com/async-upgrade configured logger.go:42: 14:56:29 | async-upgrade/4-upgrade-cluster | test step failed 4-upgrade-cluster case.go:396: failed in step 4-upgrade-cluster case.go:398: --- StatefulSet:kuttl-test-causal-koala/async-upgrade-mysql +++ StatefulSet:kuttl-test-causal-koala/async-upgrade-mysql @@ -1,13 +1,32 @@ apiVersion: apps/v1 kind: StatefulSet metadata: + annotations: + percona.com/last-config-hash: caf661403532b9160c0593c8bb40a336 + labels: + app.kubernetes.io/component: mysql + app.kubernetes.io/instance: async-upgrade + app.kubernetes.io/managed-by: percona-server-operator + app.kubernetes.io/name: percona-server + app.kubernetes.io/part-of: percona-server + managedFields: '[... elided field over 10 lines long ...]' name: async-upgrade-mysql namespace: kuttl-test-causal-koala + ownerReferences: + - apiVersion: ps.percona.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: PerconaServerMySQL + name: async-upgrade + uid: d47c27c3-ae59-4643-b0a8-dc2eaee3ff41 +spec: '[... elided field over 10 lines long ...]' status: + availableReplicas: 3 collisionCount: 0 - currentReplicas: 3 + currentRevision: async-upgrade-mysql-c896cb7bc observedGeneration: 2 readyReplicas: 3 replicas: 3 - updatedReplicas: 3 + updateRevision: async-upgrade-mysql-684f748dd4 + updatedReplicas: 2 case.go:398: resource StatefulSet:kuttl-test-causal-koala/async-upgrade-mysql: .status.updatedReplicas: value mismatch, expected: 3 != actual: 2 case.go:398: --- PerconaServerMySQL:kuttl-test-causal-koala/async-upgrade +++ PerconaServerMySQL:kuttl-test-causal-koala/async-upgrade @@ -1,22 +1,31 @@ apiVersion: ps.percona.com/v1alpha1 kind: PerconaServerMySQL metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"ps.percona.com/v1alpha1","kind":"PerconaServerMySQL","metadata":{"annotations":{},"finalizers":["percona.com/delete-mysql-pods-in-order"],"name":"async-upgrade","namespace":"kuttl-test-causal-koala"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mysql-operator:main-backup","imagePullPolicy":"Always","storages":{"s3-us-west":{"s3":{"bucket":"S3-BACKUP-BUCKET-NAME-HERE","credentialsSecret":"cluster1-s3-credentials","region":"us-west-2"},"type":"s3","verifyTLS":true}}},"crVersion":"","initImage":"perconalab/percona-server-mysql-operator:PR-918-24aa4dce","mysql":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"autoRecovery":true,"clusterType":"async","image":"perconalab/percona-server-mysql-operator:main-psmysql","imagePullPolicy":"Always","resources":{"limits":{"memory":"4G"},"requests":{"memory":"2G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"2G"}}}}},"orchestrator":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"enabled":true,"image":"perconalab/percona-server-mysql-operator:main-orchestrator","imagePullPolicy":"Always","resources":{"limits":{"memory":"256M"},"requests":{"memory":"128M"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"1G"}}}}},"pmm":{"enabled":false,"image":"perconalab/pmm-client:3-dev-latest","imagePullPolicy":"Always","resources":{"requests":{"cpu":"300m","memory":"150M"}},"serverHost":"monitoring-service","serverUser":"admin"},"proxy":{"haproxy":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"enabled":true,"image":"perconalab/percona-server-mysql-operator:main-haproxy","imagePullPolicy":"Always","resources":{"requests":{"cpu":"600m","memory":"1G"}},"size":3},"router":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"enabled":false,"image":"perconalab/percona-server-mysql-operator:main-router","imagePullPolicy":"Always","resources":{"limits":{"memory":"256M"},"requests":{"memory":"256M"}},"size":3}},"secretsName":"test-secrets","sslSecretName":"test-ssl","toolkit":{"image":"perconalab/percona-server-mysql-operator:main-toolkit","imagePullPolicy":"Always"},"updateStrategy":"SmartUpdate","upgradeOptions":{"apply":"disabled","versionServiceEndpoint":"https://check.percona.com"}}} finalizers: - percona.com/delete-mysql-pods-in-order + managedFields: '[... elided field over 10 lines long ...]' name: async-upgrade namespace: kuttl-test-causal-koala +spec: '[... elided field over 10 lines long ...]' status: + conditions: '[... elided field over 10 lines long ...]' haproxy: ready: 3 size: 3 state: ready + host: async-upgrade-haproxy.kuttl-test-causal-koala mysql: - ready: 3 + ready: 2 size: 3 - state: ready + state: initializing + version: 8.0.42-33 orchestrator: ready: 3 size: 3 state: ready - state: ready + router: {} + state: initializing case.go:398: resource PerconaServerMySQL:kuttl-test-causal-koala/async-upgrade: .status.mysql.state: value mismatch, expected: ready != actual: initializing logger.go:42: 14:56:29 | async-upgrade | async-upgrade events from ns kuttl-test-causal-koala: logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:22 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-causal-koala/mysql-client to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:22 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "perconalab/percona-server-mysql-operator:main-psmysql" already present on machine kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:22 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:22 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:35 +0000 UTC Normal StatefulSet.apps async-upgrade-mysql SuccessfulCreate create Claim datadir-async-upgrade-mysql-0 Pod async-upgrade-mysql-0 in StatefulSet async-upgrade-mysql success statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:35 +0000 UTC Normal StatefulSet.apps async-upgrade-mysql SuccessfulCreate create Pod async-upgrade-mysql-0 in StatefulSet async-upgrade-mysql successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:35 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:35 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-causal-koala/datadir-async-upgrade-mysql-0" pd.csi.storage.gke.io_gke-b2b8c6443cf74b648ce5-4686-d809-vm_29dc1810-6763-48a1-a943-75675505deae logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:35 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:36 +0000 UTC Normal Pod async-upgrade-orc-0 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-orc-0 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-pwl2 default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:36 +0000 UTC Normal Pod async-upgrade-orc-0.spec.initContainers{orc-init} Pulling Pulling image "percona/percona-server-mysql-operator:0.10.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:36 +0000 UTC Normal StatefulSet.apps async-upgrade-orc SuccessfulCreate create Pod async-upgrade-orc-0 in StatefulSet async-upgrade-orc successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:37 +0000 UTC Normal Pod async-upgrade-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "percona/percona-server-mysql-operator:0.10.0" in 280ms (280ms including waiting). Image size: 108503556 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:37 +0000 UTC Normal Pod async-upgrade-orc-0.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:37 +0000 UTC Normal Pod async-upgrade-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:39 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{orc} Pulling Pulling image "percona/percona-orchestrator:3.2.6-17" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:39 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-7dac4228-2732-4029-896c-ce364ec11190 pd.csi.storage.gke.io_gke-b2b8c6443cf74b648ce5-4686-d809-vm_29dc1810-6763-48a1-a943-75675505deae logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:40 +0000 UTC Normal Pod async-upgrade-mysql-0 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-mysql-0 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:40 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{orc} Pulled Successfully pulled image "percona/percona-orchestrator:3.2.6-17" in 236ms (236ms including waiting). Image size: 76458459 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:40 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:40 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:40 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{mysql-monit} Pulling Pulling image "percona/percona-orchestrator:3.2.6-17" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:40 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "percona/percona-orchestrator:3.2.6-17" in 238ms (238ms including waiting). Image size: 76458459 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:40 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:40 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:48 +0000 UTC Normal Pod async-upgrade-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-7dac4228-2732-4029-896c-ce364ec11190" attachdetach-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:49 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "percona/percona-server-mysql-operator:0.10.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:49 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "percona/percona-server-mysql-operator:0.10.0" in 237ms (237ms including waiting). Image size: 108503556 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:49 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:49 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:51 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{mysql} Pulling Pulling image "percona/percona-server:8.0.42-33" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:51 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.0.42-33" in 237ms (237ms including waiting). Image size: 436365358 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:51 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:51 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:51 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "percona/percona-xtrabackup:8.0.35-33" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:52 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "percona/percona-xtrabackup:8.0.35-33" in 209ms (209ms including waiting). Image size: 425838624 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:52 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:52 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:52 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "percona/percona-toolkit:3.7.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:52 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "percona/percona-toolkit:3.7.0" in 232ms (232ms including waiting). Image size: 140482088 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:52 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:45:52 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:11 +0000 UTC Normal Pod async-upgrade-orc-1 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-orc-1 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-hq5c default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:11 +0000 UTC Normal StatefulSet.apps async-upgrade-orc SuccessfulCreate create Pod async-upgrade-orc-1 in StatefulSet async-upgrade-orc successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:12 +0000 UTC Normal Pod async-upgrade-orc-1.spec.initContainers{orc-init} Pulling Pulling image "percona/percona-server-mysql-operator:0.10.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:12 +0000 UTC Normal Pod async-upgrade-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "percona/percona-server-mysql-operator:0.10.0" in 256ms (256ms including waiting). Image size: 108503556 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:12 +0000 UTC Normal Pod async-upgrade-orc-1.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:12 +0000 UTC Normal Pod async-upgrade-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:14 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{orc} Pulling Pulling image "percona/percona-orchestrator:3.2.6-17" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:14 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{orc} Pulled Successfully pulled image "percona/percona-orchestrator:3.2.6-17" in 244ms (244ms including waiting). Image size: 76458459 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:14 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:14 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:14 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{mysql-monit} Pulling Pulling image "percona/percona-orchestrator:3.2.6-17" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:14 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "percona/percona-orchestrator:3.2.6-17" in 244ms (244ms including waiting). Image size: 76458459 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:14 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:15 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:24 +0000 UTC Normal StatefulSet.apps async-upgrade-mysql SuccessfulCreate create Claim datadir-async-upgrade-mysql-1 Pod async-upgrade-mysql-1 in StatefulSet async-upgrade-mysql success statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:24 +0000 UTC Normal StatefulSet.apps async-upgrade-mysql SuccessfulCreate create Pod async-upgrade-mysql-1 in StatefulSet async-upgrade-mysql successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:24 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:24 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-causal-koala/datadir-async-upgrade-mysql-1" pd.csi.storage.gke.io_gke-b2b8c6443cf74b648ce5-4686-d809-vm_29dc1810-6763-48a1-a943-75675505deae logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:24 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:28 +0000 UTC Normal Pod async-upgrade-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-haproxy-0 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:28 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "percona/percona-server-mysql-operator:0.10.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:28 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "percona/percona-server-mysql-operator:0.10.0" in 279ms (279ms including waiting). Image size: 108503556 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:28 +0000 UTC Normal StatefulSet.apps async-upgrade-haproxy SuccessfulCreate create Pod async-upgrade-haproxy-0 in StatefulSet async-upgrade-haproxy successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:28 +0000 UTC Normal Pod async-upgrade-mysql-1 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-mysql-1 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-pwl2 default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:28 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-48780a11-a103-46e5-9bfb-6e715aa979f2 pd.csi.storage.gke.io_gke-b2b8c6443cf74b648ce5-4686-d809-vm_29dc1810-6763-48a1-a943-75675505deae logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:29 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:29 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:30 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Pulling Pulling image "percona/haproxy:2.8.14" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:30 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "percona/haproxy:2.8.14" in 238ms (238ms including waiting). Image size: 102701682 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:30 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:31 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:31 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "percona/haproxy:2.8.14" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:31 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "percona/haproxy:2.8.14" in 257ms (257ms including waiting). Image size: 102701682 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:31 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:31 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:31 +0000 UTC Normal Pod async-upgrade-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-haproxy-1 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-pwl2 default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:31 +0000 UTC Normal StatefulSet.apps async-upgrade-haproxy SuccessfulCreate create Pod async-upgrade-haproxy-1 in StatefulSet async-upgrade-haproxy successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:32 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "percona/percona-server-mysql-operator:0.10.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:32 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "percona/percona-server-mysql-operator:0.10.0" in 303ms (303ms including waiting). Image size: 108503556 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:32 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:32 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:35 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{haproxy} Pulling Pulling image "percona/haproxy:2.8.14" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:35 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "percona/haproxy:2.8.14" in 237ms (237ms including waiting). Image size: 102701682 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:35 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:35 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:35 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "percona/haproxy:2.8.14" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:35 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "percona/haproxy:2.8.14" in 203ms (203ms including waiting). Image size: 102701682 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:35 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:35 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:36 +0000 UTC Normal Pod async-upgrade-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-haproxy-2 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-hq5c default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:36 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "percona/percona-server-mysql-operator:0.10.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:36 +0000 UTC Normal StatefulSet.apps async-upgrade-haproxy SuccessfulCreate create Pod async-upgrade-haproxy-2 in StatefulSet async-upgrade-haproxy successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:36 +0000 UTC Normal Pod async-upgrade-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-48780a11-a103-46e5-9bfb-6e715aa979f2" attachdetach-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:37 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "percona/percona-server-mysql-operator:0.10.0" in 300ms (300ms including waiting). Image size: 108503556 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:37 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:37 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:37 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "percona/percona-server-mysql-operator:0.10.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:37 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "percona/percona-server-mysql-operator:0.10.0" in 264ms (264ms including waiting). Image size: 108503556 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:37 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:37 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:39 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{haproxy} Pulling Pulling image "percona/haproxy:2.8.14" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:39 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "percona/haproxy:2.8.14" in 214ms (214ms including waiting). Image size: 102701682 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:39 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:39 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:39 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "percona/haproxy:2.8.14" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "percona/haproxy:2.8.14" in 233ms (233ms including waiting). Image size: 102701682 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Pulling Pulling image "percona/percona-server:8.0.42-33" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.0.42-33" in 251ms (251ms including waiting). Image size: 436365358 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "percona/percona-xtrabackup:8.0.35-33" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "percona/percona-xtrabackup:8.0.35-33" in 213ms (213ms including waiting). Image size: 425838624 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:40 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "percona/percona-toolkit:3.7.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:41 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "percona/percona-toolkit:3.7.0" in 194ms (194ms including waiting). Image size: 140482088 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:41 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:41 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:47 +0000 UTC Normal Pod async-upgrade-orc-2 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-orc-2 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:47 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Pulling Pulling image "percona/percona-server-mysql-operator:0.10.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:47 +0000 UTC Normal StatefulSet.apps async-upgrade-orc SuccessfulCreate create Pod async-upgrade-orc-2 in StatefulSet async-upgrade-orc successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:48 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "percona/percona-server-mysql-operator:0.10.0" in 291ms (291ms including waiting). Image size: 108503556 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:48 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:48 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:49 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Pulling Pulling image "percona/percona-orchestrator:3.2.6-17" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:49 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Pulled Successfully pulled image "percona/percona-orchestrator:3.2.6-17" in 253ms (253ms including waiting). Image size: 76458459 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:49 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:50 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:50 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Pulling Pulling image "percona/percona-orchestrator:3.2.6-17" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:50 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "percona/percona-orchestrator:3.2.6-17" in 248ms (248ms including waiting). Image size: 76458459 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:50 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:50 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:59 +0000 UTC Warning Pod async-upgrade-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/06/16 14:46:57 Waiting for MySQL ready state 2025/06/16 14:46:58 MySQL is ready 2025/06/16 14:46:58 Peers: [6236396434383737.async-upgrade-mysql-unready.kuttl-test-causal-koala 6334306430336637.async-upgrade-mysql-unready.kuttl-test-causal-koala] 2025/06/16 14:46:58 FQDN: async-upgrade-mysql-1.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:46:58 Primary: async-upgrade-mysql-0.async-upgrade-mysql.kuttl-test-causal-koala Replicas: [async-upgrade-mysql-1.async-upgrade-mysql.kuttl-test-causal-koala] 2025/06/16 14:46:58 lookup async-upgrade-mysql-1 [10.186.18.16] 2025/06/16 14:46:58 PodIP: 10.186.18.16 2025/06/16 14:46:58 lookup async-upgrade-mysql-0.async-upgrade-mysql.kuttl-test-causal-koala [10.186.17.13] 2025/06/16 14:46:58 PrimaryIP: 10.186.17.13 2025/06/16 14:46:58 Donor: async-upgrade-mysql-0.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:46:58 Opening connection to 10.186.18.16 2025/06/16 14:46:58 Clone required: true 2025/06/16 14:46:58 Checking if a clone in progress 2025/06/16 14:46:58 Clone in progress: false 2025/06/16 14:46:58 Cloning from async-upgrade-mysql-0.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:46:59 Clone finished. Restarting container... kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:46:59 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:03 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.0.42-33" in 242ms (242ms including waiting). Image size: 436365358 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:37 +0000 UTC Normal StatefulSet.apps async-upgrade-mysql SuccessfulCreate create Claim datadir-async-upgrade-mysql-2 Pod async-upgrade-mysql-2 in StatefulSet async-upgrade-mysql success statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:37 +0000 UTC Normal StatefulSet.apps async-upgrade-mysql SuccessfulCreate create Pod async-upgrade-mysql-2 in StatefulSet async-upgrade-mysql successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:37 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:37 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:37 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-causal-koala/datadir-async-upgrade-mysql-2" pd.csi.storage.gke.io_gke-b2b8c6443cf74b648ce5-4686-d809-vm_29dc1810-6763-48a1-a943-75675505deae logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:41 +0000 UTC Normal Pod async-upgrade-mysql-2 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-mysql-2 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-hq5c default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:41 +0000 UTC Normal PersistentVolumeClaim datadir-async-upgrade-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-f2a1f083-e731-4255-a337-a144515008e7 pd.csi.storage.gke.io_gke-b2b8c6443cf74b648ce5-4686-d809-vm_29dc1810-6763-48a1-a943-75675505deae logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:49 +0000 UTC Normal Pod async-upgrade-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-f2a1f083-e731-4255-a337-a144515008e7" attachdetach-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:50 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "percona/percona-server-mysql-operator:0.10.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:50 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "percona/percona-server-mysql-operator:0.10.0" in 273ms (273ms including waiting). Image size: 108503556 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:50 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:51 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:52 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{mysql} Pulling Pulling image "percona/percona-server:8.0.42-33" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:52 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.0.42-33" in 240ms (240ms including waiting). Image size: 436365358 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:52 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:52 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:53 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "percona/percona-xtrabackup:8.0.35-33" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:53 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "percona/percona-xtrabackup:8.0.35-33" in 241ms (241ms including waiting). Image size: 425838624 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:53 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:53 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:53 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "percona/percona-toolkit:3.7.0" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:53 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "percona/percona-toolkit:3.7.0" in 229ms (229ms including waiting). Image size: 140482088 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:53 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:47:53 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:48:11 +0000 UTC Warning Pod async-upgrade-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/06/16 14:48:10 Waiting for MySQL ready state 2025/06/16 14:48:10 MySQL is ready 2025/06/16 14:48:10 Peers: [3837396533613633.async-upgrade-mysql-unready.kuttl-test-causal-koala 6236396434383737.async-upgrade-mysql-unready.kuttl-test-causal-koala 6334306430336637.async-upgrade-mysql-unready.kuttl-test-causal-koala] 2025/06/16 14:48:10 FQDN: async-upgrade-mysql-2.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:48:10 Primary: async-upgrade-mysql-0.async-upgrade-mysql.kuttl-test-causal-koala Replicas: [async-upgrade-mysql-1.async-upgrade-mysql.kuttl-test-causal-koala async-upgrade-mysql-2.async-upgrade-mysql.kuttl-test-causal-koala] 2025/06/16 14:48:10 lookup async-upgrade-mysql-2 [10.186.16.21] 2025/06/16 14:48:10 PodIP: 10.186.16.21 2025/06/16 14:48:10 lookup async-upgrade-mysql-0.async-upgrade-mysql.kuttl-test-causal-koala [10.186.17.13] 2025/06/16 14:48:10 PrimaryIP: 10.186.17.13 2025/06/16 14:48:10 Donor: async-upgrade-mysql-1.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:48:10 Opening connection to 10.186.16.21 2025/06/16 14:48:10 Clone required: true 2025/06/16 14:48:10 Checking if a clone in progress 2025/06/16 14:48:10 Clone in progress: false 2025/06/16 14:48:10 Cloning from async-upgrade-mysql-1.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:48:11 Clone finished. Restarting container... kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:48:11 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:48:15 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.0.42-33" in 233ms (233ms including waiting). Image size: 436365358 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:29 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:29 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:29 +0000 UTC Normal StatefulSet.apps async-upgrade-orc SuccessfulDelete delete Pod async-upgrade-orc-2 in StatefulSet async-upgrade-orc successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:30 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:30 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:30 +0000 UTC Normal StatefulSet.apps async-upgrade-haproxy SuccessfulDelete delete Pod async-upgrade-haproxy-2 in StatefulSet async-upgrade-haproxy successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:31 +0000 UTC Normal Pod async-upgrade-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-haproxy-2 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-hq5c default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:31 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:31 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 225ms (225ms including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:31 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:31 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:33 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:34 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 201ms (201ms including waiting). Image size: 102724622 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:34 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:34 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:34 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:34 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 213ms (213ms including waiting). Image size: 102724622 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:34 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:34 +0000 UTC Normal Pod async-upgrade-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:35 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:35 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:35 +0000 UTC Normal Pod async-upgrade-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-haproxy-1 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-pwl2 default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:35 +0000 UTC Normal StatefulSet.apps async-upgrade-haproxy SuccessfulDelete delete Pod async-upgrade-haproxy-1 in StatefulSet async-upgrade-haproxy successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:36 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:36 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 277ms (277ms including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:36 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:36 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:38 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:38 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 175ms (175ms including waiting). Image size: 102724622 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:38 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:38 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:39 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:39 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:39 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:39 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 230ms (230ms including waiting). Image size: 102724622 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:39 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:39 +0000 UTC Normal Pod async-upgrade-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:39 +0000 UTC Normal StatefulSet.apps async-upgrade-haproxy SuccessfulDelete delete Pod async-upgrade-haproxy-0 in StatefulSet async-upgrade-haproxy successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:40 +0000 UTC Normal Pod async-upgrade-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-haproxy-0 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:41 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:41 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 167ms (167ms including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:41 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:41 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:43 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:43 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 133ms (133ms including waiting). Image size: 102724622 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:43 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:43 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:43 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:43 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 152ms (152ms including waiting). Image size: 102724622 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:43 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:43 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:48 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:48 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:48 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:49:53 +0000 UTC Normal Pod async-upgrade-mysql-1 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-mysql-1 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-pwl2 default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:00 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:00 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 279ms (279ms including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:00 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:00 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:00 +0000 UTC Normal Pod async-upgrade-orc-2 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-orc-2 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:00 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:01 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 144ms (144ms including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:01 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:01 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:02 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:02 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 192ms (192ms including waiting). Image size: 436538349 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 237ms (237ms including waiting). Image size: 437244390 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 264ms (265ms including waiting). Image size: 132936703 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 122ms (122ms including waiting). Image size: 72469805 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 113ms (113ms including waiting). Image size: 72469805 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:03 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:20 +0000 UTC Warning Pod async-upgrade-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/06/16 14:50:19 Waiting for MySQL ready state 2025/06/16 14:50:19 MySQL is ready 2025/06/16 14:50:19 Peers: [3837396533613633.async-upgrade-mysql-unready.kuttl-test-causal-koala 6334306430336637.async-upgrade-mysql-unready.kuttl-test-causal-koala 6630663337616631.async-upgrade-mysql-unready.kuttl-test-causal-koala] 2025/06/16 14:50:19 FQDN: async-upgrade-mysql-1.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:50:19 Primary: async-upgrade-mysql-0.async-upgrade-mysql.kuttl-test-causal-koala Replicas: [async-upgrade-mysql-1.async-upgrade-mysql.kuttl-test-causal-koala async-upgrade-mysql-2.async-upgrade-mysql.kuttl-test-causal-koala] 2025/06/16 14:50:19 lookup async-upgrade-mysql-1 [10.186.18.18] 2025/06/16 14:50:19 PodIP: 10.186.18.18 2025/06/16 14:50:19 lookup async-upgrade-mysql-0.async-upgrade-mysql.kuttl-test-causal-koala [10.186.17.13] 2025/06/16 14:50:19 PrimaryIP: 10.186.17.13 2025/06/16 14:50:19 Donor: async-upgrade-mysql-2.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:50:19 Opening connection to 10.186.18.18 2025/06/16 14:50:20 Clone required: true 2025/06/16 14:50:20 Checking if a clone in progress 2025/06/16 14:50:20 Clone in progress: false 2025/06/16 14:50:20 Cloning from async-upgrade-mysql-2.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:50:20 Clone finished. Restarting container... kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:21 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:25 +0000 UTC Normal Pod async-upgrade-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 224ms (224ms including waiting). Image size: 436538349 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:35 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:35 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:35 +0000 UTC Normal StatefulSet.apps async-upgrade-orc SuccessfulDelete delete Pod async-upgrade-orc-1 in StatefulSet async-upgrade-orc successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:41 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:41 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:41 +0000 UTC Normal Pod async-upgrade-haproxy-0 TaintManagerEviction Cancelling deletion of Pod kuttl-test-causal-koala/async-upgrade-haproxy-0 taint-eviction-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:41 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:41 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:41 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:41 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:41 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:41 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Killing Stopping container mysql-client kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:42 +0000 UTC Warning Pod async-upgrade-haproxy-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:42 +0000 UTC Normal Pod async-upgrade-haproxy-0 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:43 +0000 UTC Warning Pod async-upgrade-haproxy-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:49 +0000 UTC Warning Pod async-upgrade-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/06/16 14:50:49 Waiting for MySQL ready state 2025/06/16 14:50:49 MySQL is ready 2025/06/16 14:50:49 Peers: [3837396533613633.async-upgrade-mysql-unready.kuttl-test-causal-koala 6334306430336637.async-upgrade-mysql-unready.kuttl-test-causal-koala 6630663337616631.async-upgrade-mysql-unready.kuttl-test-causal-koala] 2025/06/16 14:50:49 FQDN: async-upgrade-mysql-1.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:50:49 bootstrap finished in 0.012760 seconds 2025/06/16 14:50:49 bootstrap failed: select donor: connect to 6334306430336637.async-upgrade-mysql-unready.kuttl-test-causal-koala: ping DB: dial tcp 10.186.17.13:33062: connect: connection refused kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:56 +0000 UTC Normal Pod async-upgrade-mysql-0 TaintManagerEviction Cancelling deletion of Pod kuttl-test-causal-koala/async-upgrade-mysql-0 taint-eviction-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:56 +0000 UTC Normal StatefulSet.apps async-upgrade-mysql SuccessfulDelete delete Pod async-upgrade-mysql-0 in StatefulSet async-upgrade-mysql successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:57 +0000 UTC Warning Pod async-upgrade-mysql-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:57 +0000 UTC Normal Pod async-upgrade-mysql-0 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:50:58 +0000 UTC Warning Pod async-upgrade-mysql-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:06 +0000 UTC Normal Pod async-upgrade-orc-1 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-orc-1 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-hq5c default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:07 +0000 UTC Normal Pod async-upgrade-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:07 +0000 UTC Normal Pod async-upgrade-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 264ms (264ms including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:07 +0000 UTC Normal Pod async-upgrade-orc-1.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:07 +0000 UTC Normal Pod async-upgrade-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:09 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:09 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 235ms (235ms including waiting). Image size: 72469805 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:09 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:09 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:09 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:09 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 227ms (227ms including waiting). Image size: 72469805 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:09 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:09 +0000 UTC Normal Pod async-upgrade-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:42 +0000 UTC Normal Pod async-upgrade-orc-2 TaintManagerEviction Cancelling deletion of Pod kuttl-test-causal-koala/async-upgrade-orc-2 taint-eviction-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:42 +0000 UTC Warning Pod async-upgrade-orc-2 Scheduling FailedScheduling 0/2 nodes are available: 2 node(s) didn't match pod anti-affinity rules. preemption: 0/2 nodes are available: 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:42 +0000 UTC Normal Pod async-upgrade-orc-2 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:51:43 +0000 UTC Warning Pod async-upgrade-orc-2 Scheduling FailedScheduling 0/2 nodes are available: 2 node(s) didn't match pod anti-affinity rules. preemption: 0/2 nodes are available: 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:08 +0000 UTC Warning Pod async-upgrade-haproxy-0 Scheduling FailedScheduling 0/2 nodes are available: 2 node(s) didn't match pod anti-affinity rules. preemption: 0/2 nodes are available: 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:08 +0000 UTC Warning Pod async-upgrade-mysql-0 Scheduling FailedScheduling 0/2 nodes are available: 2 node(s) didn't match pod anti-affinity rules. preemption: 0/2 nodes are available: 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:09 +0000 UTC Normal Pod mysql-client TaintManagerEviction Cancelling deletion of Pod kuttl-test-causal-koala/mysql-client taint-eviction-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:13 +0000 UTC Normal Pod async-upgrade-orc-2 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-orc-2 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:14 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:20 +0000 UTC Normal Pod async-upgrade-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-haproxy-0 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:20 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:24 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 1.812s (3.818s including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:24 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:24 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:24 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 8.12s (9.705s including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:24 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:24 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:30 +0000 UTC Normal Pod async-upgrade-mysql-0 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-mysql-0 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:31 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Killing Stopping container haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:31 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Killing Stopping container orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:33 +0000 UTC Warning Pod async-upgrade-haproxy-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:33 +0000 UTC Normal Pod async-upgrade-haproxy-0 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:33 +0000 UTC Warning Pod async-upgrade-mysql-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:33 +0000 UTC Normal Pod async-upgrade-mysql-0 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:33 +0000 UTC Warning Pod async-upgrade-orc-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:34 +0000 UTC Warning Pod async-upgrade-haproxy-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:34 +0000 UTC Warning Pod async-upgrade-mysql-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:34 +0000 UTC Normal Pod async-upgrade-orc-2 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:52:34 +0000 UTC Warning Pod async-upgrade-orc-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:53:26 +0000 UTC Warning Pod async-upgrade-mysql-0 FailedAttachVolume AttachVolume.Attach failed for volume "pvc-7dac4228-2732-4029-896c-ce364ec11190" : CSINode gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp does not contain driver pd.csi.storage.gke.io attachdetach-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:12 +0000 UTC Normal Pod async-upgrade-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-haproxy-0 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:12 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:12 +0000 UTC Warning Pod async-upgrade-mysql-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had volume node affinity conflict, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:12 +0000 UTC Normal Pod async-upgrade-orc-2 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-orc-2 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:13 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:20 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 6.003s (7.919s including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:20 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:20 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:20 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 5.352s (7.042s including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:20 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:20 +0000 UTC Normal Pod async-upgrade-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:22 +0000 UTC Normal Pod async-upgrade-mysql-0 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-mysql-0 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-r2xp default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:29 +0000 UTC Normal Pod async-upgrade-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-7dac4228-2732-4029-896c-ce364ec11190" attachdetach-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:30 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:30 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:30 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:31 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 298ms (298ms including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:31 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:31 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 4.029s (4.029s including waiting). Image size: 105430309 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 153ms (153ms including waiting). Image size: 105430309 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 4.029s (4.029s including waiting). Image size: 72469864 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 172ms (172ms including waiting). Image size: 72469864 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:34 +0000 UTC Normal Pod async-upgrade-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:58 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 24.48s (24.48s including waiting). Image size: 436538349 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:58 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:58 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:54:58 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:08 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:08 +0000 UTC Normal StatefulSet.apps async-upgrade-orc SuccessfulDelete delete Pod async-upgrade-orc-0 in StatefulSet async-upgrade-orc successful statefulset-controller logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:09 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:27 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 28.36s (28.36s including waiting). Image size: 437244390 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:27 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:27 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:27 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:32 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 4.458s (4.458s including waiting). Image size: 132936537 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:32 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:32 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:40 +0000 UTC Normal Pod async-upgrade-orc-0 Binding Scheduled Successfully assigned kuttl-test-causal-koala/async-upgrade-orc-0 to gke-jen-ps-918-24aa4dce--default-pool-5c33c138-pwl2 default-scheduler logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:40 +0000 UTC Normal Pod async-upgrade-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:41 +0000 UTC Normal Pod async-upgrade-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-918-24aa4dce" in 258ms (258ms including waiting). Image size: 108815039 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:41 +0000 UTC Normal Pod async-upgrade-orc-0.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:41 +0000 UTC Normal Pod async-upgrade-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:43 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:44 +0000 UTC Warning Pod async-upgrade-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: 2025/06/16 14:55:40 Waiting for MySQL ready state 2025/06/16 14:55:40 MySQL is ready 2025/06/16 14:55:40 Peers: [3837396533613633.async-upgrade-mysql-unready.kuttl-test-causal-koala 6266663263336232.async-upgrade-mysql-unready.kuttl-test-causal-koala 6630663337616631.async-upgrade-mysql-unready.kuttl-test-causal-koala] 2025/06/16 14:55:40 FQDN: async-upgrade-mysql-0.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:55:40 Primary: async-upgrade-mysql-2.async-upgrade-mysql.kuttl-test-causal-koala Replicas: [async-upgrade-mysql-0.async-upgrade-mysql.kuttl-test-causal-koala async-upgrade-mysql-1.async-upgrade-mysql.kuttl-test-causal-koala] 2025/06/16 14:55:40 lookup async-upgrade-mysql-0 [10.186.17.5] 2025/06/16 14:55:40 PodIP: 10.186.17.5 2025/06/16 14:55:40 lookup async-upgrade-mysql-2.async-upgrade-mysql.kuttl-test-causal-koala [10.186.16.21] 2025/06/16 14:55:40 PrimaryIP: 10.186.16.21 2025/06/16 14:55:41 Donor: async-upgrade-mysql-1.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:55:41 Opening connection to 10.186.17.5 2025/06/16 14:55:41 Clone required: true 2025/06/16 14:55:41 Checking if a clone in progress 2025/06/16 14:55:41 Clone in progress: false 2025/06/16 14:55:41 Cloning from async-upgrade-mysql-1.async-upgrade-mysql.kuttl-test-causal-koala 2025/06/16 14:55:44 Clone finished. Restarting container... kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:44 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:45 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 2.281s (2.281s including waiting). Image size: 72469864 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:45 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:45 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:45 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:45 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 188ms (188ms including waiting). Image size: 72469864 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:45 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:45 +0000 UTC Normal Pod async-upgrade-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:48 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 98ms (98ms including waiting). Image size: 436538349 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:48 +0000 UTC Normal Pod async-upgrade-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 109ms (109ms including waiting). Image size: 132936537 bytes. kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:55:55 +0000 UTC Warning Pod async-upgrade-orc-0.spec.containers{orc} Unhealthy Liveness probe failed: Get "http://10.186.18.22:3000/api/lb-check": dial tcp 10.186.18.22:3000: connect: connection refused kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:56:21 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:56:21 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:56:21 +0000 UTC Normal Pod async-upgrade-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 14:56:29 | async-upgrade | 2025-06-16 14:56:25 +0000 UTC Warning Pod async-upgrade-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/06/16 14:56:25 MySQL state is not ready... kubelet logger.go:42: 14:56:29 | async-upgrade | Deleting namespace: kuttl-test-causal-koala === NAME kuttl harness.go:403: run tests finished harness.go:510: cleaning up harness.go:567: removing temp folder: "" --- FAIL: kuttl (731.96s) --- FAIL: kuttl/harness (0.00s) --- FAIL: kuttl/harness/async-upgrade (731.31s) FAIL