Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/logs/upgrade-proxysql-5-7.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + CLUSTER=upgrade-proxysql + CLUSTER_SIZE=3 + TARGET_OPERATOR_VER=1.15.0 + TARGET_IMAGE=perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68 + TARGET_IMAGE_PXC=perconalab/percona-xtradb-cluster-operator:main-pxc5.7 + TARGET_IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest + TARGET_IMAGE_PROXY=perconalab/percona-xtradb-cluster-operator:main-proxysql + TARGET_IMAGE_HAPROXY=perconalab/percona-xtradb-cluster-operator:main-haproxy + TARGET_IMAGE_BACKUP=perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 == *\p\e\r\c\o\n\a\-\x\t\r\a\d\b\-\c\l\u\s\t\e\r\-\o\p\e\r\a\t\o\r* ]] ++ echo -n perconalab/percona-xtradb-cluster-operator:main-pxc5.7 ++ /usr/bin/sed -r 's/.*([0-9].[0-9])$/\1/' + PXC_VER=5.7 ++ curl -s https://check.percona.com/versions/v1/pxc-operator ++ jq -r '.versions[].operator' ++ sort -V ++ tail -n1 + INIT_OPERATOR_VER=1.14.0 + [[ 1.14.0 == \1\.\1\5\.\0 ]] + GIT_TAG=v1.14.0 ++ curl -s 'https://check.percona.com/versions/v1/pxc-operator/1.14.0/latest?databaseVersion=5.7' + INIT_OPERATOR_IMAGES='{"versions":[{"product":"pxc-operator", "operator":"1.14.0", "matrix":{"mongod":{}, "pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65", "imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.5.5-1.2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2", "imageHash":"fcd7c1366f26f3dbaeff65d7ee08c43bbb88838cec9a0085447c1b56c717870d", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.5":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-haproxy", "imageHash":"15b9dad6d59c7995456b92fb1b5c17501ecbc8bafb758ff6e7417d409f06bbbd", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"2.4.29":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29", "imageHash":"e4871437d1a6952f67c43bd10a236dd36c72519220971a8ce644e9320a2a642e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0", "imageHash":"e1b5b1d9db19f394f47b32f374a017d9debcf1e06325ab7bcc4b233da59f6f19", "imageHashArm64":"2652b81a26be7b07e084e1ccbfe42e64143eb6a32e4529f1e4b27e1061dbc0b1", "status":"recommended", "critical":false}}, "logCollector":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-logcollector", "imageHash":"f8f56b8da5b1d9859dded3f89b7ce41c5b3ceba6d78f7d4152bd0b14bafc60f4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + OPERATOR_NAME=percona-xtradb-cluster-operator ++ jq -r '.versions[].matrix.operator[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.14.0", "matrix":{"mongod":{}, "pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65", "imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.5.5-1.2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2", "imageHash":"fcd7c1366f26f3dbaeff65d7ee08c43bbb88838cec9a0085447c1b56c717870d", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.5":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-haproxy", "imageHash":"15b9dad6d59c7995456b92fb1b5c17501ecbc8bafb758ff6e7417d409f06bbbd", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"2.4.29":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29", "imageHash":"e4871437d1a6952f67c43bd10a236dd36c72519220971a8ce644e9320a2a642e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0", "imageHash":"e1b5b1d9db19f394f47b32f374a017d9debcf1e06325ab7bcc4b233da59f6f19", "imageHashArm64":"2652b81a26be7b07e084e1ccbfe42e64143eb6a32e4529f1e4b27e1061dbc0b1", "status":"recommended", "critical":false}}, "logCollector":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-logcollector", "imageHash":"f8f56b8da5b1d9859dded3f89b7ce41c5b3ceba6d78f7d4152bd0b14bafc60f4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + IMAGE=percona/percona-xtradb-cluster-operator:1.14.0 ++ cut -d/ -f1 ++ echo perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68 + [[ perconalab == \p\e\r\c\o\n\a\l\a\b ]] + IMAGE=perconalab/percona-xtradb-cluster-operator:1.14.0 ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.14.0", "matrix":{"mongod":{}, "pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65", "imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.5.5-1.2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2", "imageHash":"fcd7c1366f26f3dbaeff65d7ee08c43bbb88838cec9a0085447c1b56c717870d", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.5":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-haproxy", "imageHash":"15b9dad6d59c7995456b92fb1b5c17501ecbc8bafb758ff6e7417d409f06bbbd", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"2.4.29":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29", "imageHash":"e4871437d1a6952f67c43bd10a236dd36c72519220971a8ce644e9320a2a642e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0", "imageHash":"e1b5b1d9db19f394f47b32f374a017d9debcf1e06325ab7bcc4b233da59f6f19", "imageHashArm64":"2652b81a26be7b07e084e1ccbfe42e64143eb6a32e4529f1e4b27e1061dbc0b1", "status":"recommended", "critical":false}}, "logCollector":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-logcollector", "imageHash":"f8f56b8da5b1d9859dded3f89b7ce41c5b3ceba6d78f7d4152bd0b14bafc60f4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.pxc[].imagePath' + IMAGE_PXC=percona/percona-xtradb-cluster:5.7.44-31.65 ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.14.0", "matrix":{"mongod":{}, "pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65", "imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.5.5-1.2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2", "imageHash":"fcd7c1366f26f3dbaeff65d7ee08c43bbb88838cec9a0085447c1b56c717870d", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.5":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-haproxy", "imageHash":"15b9dad6d59c7995456b92fb1b5c17501ecbc8bafb758ff6e7417d409f06bbbd", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"2.4.29":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29", "imageHash":"e4871437d1a6952f67c43bd10a236dd36c72519220971a8ce644e9320a2a642e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0", "imageHash":"e1b5b1d9db19f394f47b32f374a017d9debcf1e06325ab7bcc4b233da59f6f19", "imageHashArm64":"2652b81a26be7b07e084e1ccbfe42e64143eb6a32e4529f1e4b27e1061dbc0b1", "status":"recommended", "critical":false}}, "logCollector":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-logcollector", "imageHash":"f8f56b8da5b1d9859dded3f89b7ce41c5b3ceba6d78f7d4152bd0b14bafc60f4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.pmm[].imagePath' + IMAGE_PMM_CLIENT=percona/pmm-client:2.41.2 ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.14.0", "matrix":{"mongod":{}, "pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65", "imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.5.5-1.2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2", "imageHash":"fcd7c1366f26f3dbaeff65d7ee08c43bbb88838cec9a0085447c1b56c717870d", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.5":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-haproxy", "imageHash":"15b9dad6d59c7995456b92fb1b5c17501ecbc8bafb758ff6e7417d409f06bbbd", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"2.4.29":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29", "imageHash":"e4871437d1a6952f67c43bd10a236dd36c72519220971a8ce644e9320a2a642e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0", "imageHash":"e1b5b1d9db19f394f47b32f374a017d9debcf1e06325ab7bcc4b233da59f6f19", "imageHashArm64":"2652b81a26be7b07e084e1ccbfe42e64143eb6a32e4529f1e4b27e1061dbc0b1", "status":"recommended", "critical":false}}, "logCollector":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-logcollector", "imageHash":"f8f56b8da5b1d9859dded3f89b7ce41c5b3ceba6d78f7d4152bd0b14bafc60f4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.proxysql[].imagePath' + IMAGE_PROXY=percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2 ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.14.0", "matrix":{"mongod":{}, "pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65", "imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.5.5-1.2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2", "imageHash":"fcd7c1366f26f3dbaeff65d7ee08c43bbb88838cec9a0085447c1b56c717870d", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.5":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-haproxy", "imageHash":"15b9dad6d59c7995456b92fb1b5c17501ecbc8bafb758ff6e7417d409f06bbbd", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"2.4.29":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29", "imageHash":"e4871437d1a6952f67c43bd10a236dd36c72519220971a8ce644e9320a2a642e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0", "imageHash":"e1b5b1d9db19f394f47b32f374a017d9debcf1e06325ab7bcc4b233da59f6f19", "imageHashArm64":"2652b81a26be7b07e084e1ccbfe42e64143eb6a32e4529f1e4b27e1061dbc0b1", "status":"recommended", "critical":false}}, "logCollector":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-logcollector", "imageHash":"f8f56b8da5b1d9859dded3f89b7ce41c5b3ceba6d78f7d4152bd0b14bafc60f4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.haproxy[].imagePath' + IMAGE_HAPROXY=percona/percona-xtradb-cluster-operator:1.14.0-haproxy ++ jq -r '.versions[].matrix.backup[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.14.0", "matrix":{"mongod":{}, "pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65", "imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.5.5-1.2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2", "imageHash":"fcd7c1366f26f3dbaeff65d7ee08c43bbb88838cec9a0085447c1b56c717870d", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.5":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-haproxy", "imageHash":"15b9dad6d59c7995456b92fb1b5c17501ecbc8bafb758ff6e7417d409f06bbbd", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"2.4.29":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29", "imageHash":"e4871437d1a6952f67c43bd10a236dd36c72519220971a8ce644e9320a2a642e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0", "imageHash":"e1b5b1d9db19f394f47b32f374a017d9debcf1e06325ab7bcc4b233da59f6f19", "imageHashArm64":"2652b81a26be7b07e084e1ccbfe42e64143eb6a32e4529f1e4b27e1061dbc0b1", "status":"recommended", "critical":false}}, "logCollector":{"1.14.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.14.0-logcollector", "imageHash":"f8f56b8da5b1d9859dded3f89b7ce41c5b3ceba6d78f7d4152bd0b14bafc60f4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + IMAGE_BACKUP=percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29 + [[ 1.15.0 == \1\.\1\4\.\0 ]] + main + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.30dOyGHPkY ++ mktemp + local LAST_ERR=/tmp/tmp.wtcLTKSg0q + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.30dOyGHPkY namespace/cert-manager created + cat /tmp/tmp.wtcLTKSg0q + rm /tmp/tmp.30dOyGHPkY /tmp/tmp.wtcLTKSg0q + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.HMuSw1X5RD ++ mktemp + local LAST_ERR=/tmp/tmp.VH379wKrk7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.HMuSw1X5RD namespace/cert-manager labeled + cat /tmp/tmp.VH379wKrk7 + rm /tmp/tmp.HMuSw1X5RD /tmp/tmp.VH379wKrk7 + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.plzEyDa6mJ ++ mktemp + local LAST_ERR=/tmp/tmp.i8kscrYrUy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.plzEyDa6mJ namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.i8kscrYrUy Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.plzEyDa6mJ /tmp/tmp.i8kscrYrUy + return 0 + '[' '' == 4.10 ']' + sleep 70 + create_infra_gh upgrade-proxysql-23780 v1.14.0 + local ns=upgrade-proxysql-23780 + local git_tag=v1.14.0 + '[' -n pxc-operator ']' + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.Jc1I8REzUP ++ mktemp + local LAST_OUT=/tmp/tmp.MmsO4t6UwU + local LAST_ERR=/tmp/tmp.tWTqzl2NBZ ++ mktemp + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.hyqNKNg78A + local exit_status=0 + for i in '$(seq 0 2)' ++ seq 0 2 + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Jc1I8REzUP + cat /tmp/tmp.tWTqzl2NBZ + rm /tmp/tmp.Jc1I8REzUP /tmp/tmp.tWTqzl2NBZ + return 0 namespace "cert-manager" deleted namespace "upgrade-proxysql-15249" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MmsO4t6UwU namespace "pxc-operator" deleted + cat /tmp/tmp.hyqNKNg78A + rm /tmp/tmp.MmsO4t6UwU /tmp/tmp.hyqNKNg78A + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'namespace/pxc-operator - ' namespace/pxc-operator - + set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fT9qVgagI8 ++ mktemp + local LAST_ERR=/tmp/tmp.NvebzoDtAs + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fT9qVgagI8 namespace/pxc-operator created + cat /tmp/tmp.NvebzoDtAs + rm /tmp/tmp.fT9qVgagI8 /tmp/tmp.NvebzoDtAs + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.3pbTrBQQzR +++ mktemp ++ local LAST_ERR=/tmp/tmp.V6KzD79XOF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3pbTrBQQzR ++ cat /tmp/tmp.V6KzD79XOF ++ rm /tmp/tmp.3pbTrBQQzR /tmp/tmp.V6KzD79XOF ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.3n3jtpSM1I ++ mktemp + local LAST_ERR=/tmp/tmp.F52FK0rWzE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3n3jtpSM1I Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3" modified. + cat /tmp/tmp.F52FK0rWzE + rm /tmp/tmp.3n3jtpSM1I /tmp/tmp.F52FK0rWzE + return 0 + deploy_operator_gh v1.14.0 + local git_tag=v1.14.0 + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- ++ kubectl_bin get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B2eHdyBjEW +++ mktemp ++ local LAST_ERR=/tmp/tmp.l6BMRweG3r ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.B2eHdyBjEW ++ cat /tmp/tmp.l6BMRweG3r ++ rm /tmp/tmp.B2eHdyBjEW /tmp/tmp.l6BMRweG3r ++ return 0 + [[ -n perconaxtradbclusters.pxc.percona.com ]] ++ kubectl_bin get crd/perconaxtradbclusters.pxc.percona.com -o 'jsonpath={.spec.versions[?(@.name == "v1-14-0")].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sSnnbVPdvA +++ mktemp ++ local LAST_ERR=/tmp/tmp.kmvEnWsoNz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/perconaxtradbclusters.pxc.percona.com -o 'jsonpath={.spec.versions[?(@.name == "v1-14-0")].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sSnnbVPdvA ++ cat /tmp/tmp.kmvEnWsoNz ++ rm /tmp/tmp.sSnnbVPdvA /tmp/tmp.kmvEnWsoNz ++ return 0 + [[ -n '' ]] + kubectl_bin apply --server-side --force-conflicts -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.14.0/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.JxBY8KfLX2 ++ mktemp + local LAST_ERR=/tmp/tmp.6uudEcGjOf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.14.0/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.JxBY8KfLX2 + cat /tmp/tmp.6uudEcGjOf + rm /tmp/tmp.JxBY8KfLX2 /tmp/tmp.6uudEcGjOf + return 0 + local rbac_yaml=rbac + local operator_yaml=operator.yaml + '[' -n pxc-operator ']' + rbac_yaml=cw-rbac + operator_yaml=cw-operator.yaml + apply_rbac_gh cw-rbac v1.14.0 + local operator_namespace=pxc-operator + local rbac=cw-rbac + local git_tag=v1.14.0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.14.0/deploy/cw-rbac.yaml + /usr/bin/sed -i -e 's^namespace: .*^namespace: pxc-operator^' /tmp/tmp.L6uKB17ktu/rbac_v1.14.0.yaml + kubectl_bin apply -f /tmp/tmp.L6uKB17ktu/rbac_v1.14.0.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.kNXtp5ki63 ++ mktemp + local LAST_ERR=/tmp/tmp.kwQqQJAN3T + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /tmp/tmp.L6uKB17ktu/rbac_v1.14.0.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kNXtp5ki63 clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator configured serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.kwQqQJAN3T + rm /tmp/tmp.kNXtp5ki63 /tmp/tmp.kwQqQJAN3T + return 0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.14.0/deploy/cw-operator.yaml + cat /tmp/tmp.L6uKB17ktu/cw-operator.yaml_v1.14.0.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:1.14.0^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' + kubectl_bin apply -n pxc-operator -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' ++ mktemp + local LAST_OUT=/tmp/tmp.XNShR0SN35 ++ mktemp + local LAST_ERR=/tmp/tmp.gif6VT7Oar + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n pxc-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XNShR0SN35 deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.gif6VT7Oar + rm /tmp/tmp.XNShR0SN35 /tmp/tmp.gif6VT7Oar + return 0 + sleep 2 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.e4DYiXcMaV +++ mktemp ++ local LAST_ERR=/tmp/tmp.F14B8G6XeI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.e4DYiXcMaV ++ cat /tmp/tmp.F14B8G6XeI ++ rm /tmp/tmp.e4DYiXcMaV /tmp/tmp.F14B8G6XeI ++ return 0 + wait_pod percona-xtradb-cluster-operator-7b86f9967b-m9xl7 + local pod=percona-xtradb-cluster-operator-7b86f9967b-m9xl7 + local max_retry=480 + local ns= ++ echo percona-xtradb-cluster-operator-7b86f9967b-m9xl7 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-7b86f9967b-m9xl7 condition met percona-xtradb-cluster-operator-7b86f9967b-m9xl7.Ok + create_namespace upgrade-proxysql-23780 + local namespace=upgrade-proxysql-23780 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces upgrade-proxysql-23780' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-proxysql-23780 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-proxysql-23780 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.R5leaIuQQa + local LAST_OUT=/tmp/tmp.r221BCg6Cj ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.2i802mQXzP + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.0vp42U9R7q + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-proxysql-23780 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-proxysql-23780 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.r221BCg6Cj + cat /tmp/tmp.2i802mQXzP + rm /tmp/tmp.r221BCg6Cj /tmp/tmp.2i802mQXzP + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-proxysql-23780 Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.R5leaIuQQa + cat /tmp/tmp.0vp42U9R7q Error from server (NotFound): namespaces "upgrade-proxysql-23780" not found + rm /tmp/tmp.R5leaIuQQa /tmp/tmp.0vp42U9R7q + return 1 + : + wait_for_delete namespace/upgrade-proxysql-23780 + local res=namespace/upgrade-proxysql-23780 + echo -n 'namespace/upgrade-proxysql-23780 - ' namespace/upgrade-proxysql-23780 - + set +o xtrace Error from server (NotFound): namespaces "upgrade-proxysql-23780" not found + desc 'create namespace upgrade-proxysql-23780' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-proxysql-23780 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-proxysql-23780 ++ mktemp + local LAST_OUT=/tmp/tmp.GtNujGrR2N ++ mktemp + local LAST_ERR=/tmp/tmp.zqyiFaYnik + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace upgrade-proxysql-23780 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GtNujGrR2N namespace/upgrade-proxysql-23780 created + cat /tmp/tmp.zqyiFaYnik + rm /tmp/tmp.GtNujGrR2N /tmp/tmp.zqyiFaYnik + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.SCuXUsz9te +++ mktemp ++ local LAST_ERR=/tmp/tmp.0nvZYnEmLu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SCuXUsz9te ++ cat /tmp/tmp.0nvZYnEmLu ++ rm /tmp/tmp.SCuXUsz9te /tmp/tmp.0nvZYnEmLu ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3 --namespace=upgrade-proxysql-23780 ++ mktemp + local LAST_OUT=/tmp/tmp.yi9sLVbwg7 ++ mktemp + local LAST_ERR=/tmp/tmp.z0JmtUWDlT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3 --namespace=upgrade-proxysql-23780 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yi9sLVbwg7 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3" modified. + cat /tmp/tmp.z0JmtUWDlT + rm /tmp/tmp.yi9sLVbwg7 /tmp/tmp.z0JmtUWDlT + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.iXgk0E2Doi ++ mktemp + local LAST_ERR=/tmp/tmp.sdLzPziNYT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.iXgk0E2Doi secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.sdLzPziNYT + rm /tmp/tmp.iXgk0E2Doi /tmp/tmp.sdLzPziNYT + return 0 + local proxy=proxysql + local cr_yaml=/tmp/tmp.L6uKB17ktu/cr_1.14.0_proxysql.yaml + prepare_cr_yaml /tmp/tmp.L6uKB17ktu/cr_1.14.0_proxysql.yaml proxysql upgrade-proxysql 3 v1.14.0 + local cr_yaml=/tmp/tmp.L6uKB17ktu/cr_1.14.0_proxysql.yaml + local proxy=proxysql + local cluster=upgrade-proxysql + local cluster_size=3 + local git_tag=v1.14.0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.14.0/deploy/cr.yaml + yq eval ' .metadata.name = "upgrade-proxysql" | .spec.secretsName = "my-cluster-secrets" | .spec.vaultSecretName = "some-name-vault" | .spec.sslSecretName = "some-name-ssl" | .spec.sslInternalSecretName = "some-name-ssl-internal" | .spec.upgradeOptions.apply = "disabled" | .spec.pxc.size = 3 | .spec.proxysql.size = 3 | .spec.haproxy.size = 3 | .spec.pxc.image = "-pxc" | .spec.proxysql.image = "-proxysql" | .spec.haproxy.image = "-haproxy" | .spec.backup.image = "-backup" | .spec.backup.storages.minio.s3.credentialsSecret = "minio-secret" | .spec.backup.storages.minio.s3.region = "us-east-1" | .spec.backup.storages.minio.s3.bucket = "operator-testing" | .spec.backup.storages.minio.s3.endpointUrl = "http://minio-service.#namespace:9000/" | .spec.backup.storages.minio.type = "s3" | .spec.pmm.image = "-pmm" ' - + [[ proxysql == \h\a\p\r\o\x\y ]] + yq -i eval ' .spec.haproxy.enabled = false | .spec.proxysql.enabled = true ' /tmp/tmp.L6uKB17ktu/cr_1.14.0_proxysql.yaml + spinup_pxc upgrade-proxysql /tmp/tmp.L6uKB17ktu/cr_1.14.0_proxysql.yaml 3 30 /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/secrets_without_tls.yml + local cluster=upgrade-proxysql + local config=/tmp/tmp.L6uKB17ktu/cr_1.14.0_proxysql.yaml + local size=3 + local sleep=30 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/secrets_without_tls.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/secrets_without_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.VrYVXjgj2q ++ mktemp + local LAST_ERR=/tmp/tmp.f5YywuxOQQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/secrets_without_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VrYVXjgj2q secret/my-cluster-secrets created + cat /tmp/tmp.f5YywuxOQQ + rm /tmp/tmp.VrYVXjgj2q /tmp/tmp.f5YywuxOQQ + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/client.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/client.yml + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/percona-xtradb-cluster-operator:1.14.0-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-proxysql-23780~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:2.41.2#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:5.7.44-31.65#' + local LAST_OUT=/tmp/tmp.5XiBudUXD0 + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:5.7.44-31.65#' ++ mktemp + local LAST_ERR=/tmp/tmp.O0NdRqxzfS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:1.14.0#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5XiBudUXD0 deployment.apps/pxc-client created + cat /tmp/tmp.O0NdRqxzfS + rm /tmp/tmp.5XiBudUXD0 /tmp/tmp.O0NdRqxzfS + return 0 + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] + [[ upgrade-proxysql == \d\e\m\a\n\d\-\b\a\c\k\u\p ]] + [[ upgrade-proxysql == \d\e\m\a\n\d\-\b\a\c\k\u\p\-\c\l\o\u\d ]] + apply_config /tmp/tmp.L6uKB17ktu/cr_1.14.0_proxysql.yaml + '[' -z '' ']' + cat_config /tmp/tmp.L6uKB17ktu/cr_1.14.0_proxysql.yaml + kubectl_bin apply -f - + cat /tmp/tmp.L6uKB17ktu/cr_1.14.0_proxysql.yaml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:5.7.44-31.65#' ++ mktemp + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:1.14.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:2.41.2#' + local LAST_OUT=/tmp/tmp.O3sUE4c5lL + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:5.7.44-31.65#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-proxysql-23780~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/percona-xtradb-cluster-operator:1.14.0-haproxy#' ++ mktemp + local LAST_ERR=/tmp/tmp.i9PsGD73Jl + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.O3sUE4c5lL perconaxtradbcluster.pxc.percona.com/upgrade-proxysql created + cat /tmp/tmp.i9PsGD73Jl + rm /tmp/tmp.O3sUE4c5lL /tmp/tmp.i9PsGD73Jl + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy upgrade-proxysql ++ local target_cluster=upgrade-proxysql +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fShljHG1P6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QoScFi5jac +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.fShljHG1P6 +++ cat /tmp/tmp.QoScFi5jac +++ rm /tmp/tmp.fShljHG1P6 /tmp/tmp.QoScFi5jac +++ return 0 ++ [[ false == \t\r\u\e ]] +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wG9LXbzw9s ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jElZy9cUeO +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.wG9LXbzw9s +++ cat /tmp/tmp.jElZy9cUeO +++ rm /tmp/tmp.wG9LXbzw9s /tmp/tmp.jElZy9cUeO +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo upgrade-proxysql-proxysql ++ return + local proxy=upgrade-proxysql-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-proxysql-23780 ++ mktemp + local LAST_OUT=/tmp/tmp.YuLQWVV34c ++ mktemp + local LAST_ERR=/tmp/tmp.Xway5cwsWp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-proxysql-23780 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-proxysql-23780 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-proxysql-23780 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.YuLQWVV34c + cat /tmp/tmp.Xway5cwsWp error: no matching resources found + rm /tmp/tmp.YuLQWVV34c /tmp/tmp.Xway5cwsWp + return 1 + true + wait_for_running upgrade-proxysql-proxysql 1 + local name=upgrade-proxysql-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-proxysql-0 480 + local pod=upgrade-proxysql-proxysql-0 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace Error from server (NotFound): pods "upgrade-proxysql-proxysql-0" not found upgrade-proxysql-proxysql-0...........Ok + wait_for_running upgrade-proxysql-pxc 3 + local name=upgrade-proxysql-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-0 480 + local pod=upgrade-proxysql-pxc-0 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo upgrade-proxysql-pxc-0 + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-0 condition met upgrade-proxysql-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-1 480 + local pod=upgrade-proxysql-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-1 condition met upgrade-proxysql-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-2 480 + local pod=upgrade-proxysql-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-2 condition met upgrade-proxysql-pxc-2.Ok + sleep 30 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] ++ is_keyring_plugin_in_use upgrade-proxysql ++ local cluster=upgrade-proxysql ++ kubectl_bin exec -it upgrade-proxysql-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6EJ0p9POMh +++ mktemp ++ local LAST_ERR=/tmp/tmp.fRW37CbZPU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it upgrade-proxysql-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6EJ0p9POMh ++ cat /tmp/tmp.fRW37CbZPU Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.6EJ0p9POMh /tmp/tmp.fRW37CbZPU ++ return 0 + [[ -n '' ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h upgrade-proxysql-proxysql -uroot -proot_password -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h upgrade-proxysql-proxysql -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Zmt9jDMeDk +++ mktemp ++ local LAST_ERR=/tmp/tmp.IL52DvTgEf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Zmt9jDMeDk ++ cat /tmp/tmp.IL52DvTgEf ++ rm /tmp/tmp.Zmt9jDMeDk /tmp/tmp.IL52DvTgEf ++ return 0 + client_pod=pxc-client-54c6d79f5-ml89d + wait_pod pxc-client-54c6d79f5-ml89d + local pod=pxc-client-54c6d79f5-ml89d + local max_retry=480 + local ns= ++ echo pxc-client-54c6d79f5-ml89d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-54c6d79f5-ml89d condition met pxc-client-54c6d79f5-ml89d.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h upgrade-proxysql-proxysql -uroot -proot_password -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h upgrade-proxysql-proxysql -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rYpO48Q7I1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BvwJWcr7Us ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rYpO48Q7I1 ++ cat /tmp/tmp.BvwJWcr7Us ++ rm /tmp/tmp.rYpO48Q7I1 /tmp/tmp.BvwJWcr7Us ++ return 0 + client_pod=pxc-client-54c6d79f5-ml89d + wait_pod pxc-client-54c6d79f5-ml89d + local pod=pxc-client-54c6d79f5-ml89d + local max_retry=480 + local ns= ++ echo pxc-client-54c6d79f5-ml89d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-54c6d79f5-ml89d condition met pxc-client-54c6d79f5-ml89d.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GKYj6tsrQO +++ mktemp ++ local LAST_ERR=/tmp/tmp.ccbMBrD4nF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.GKYj6tsrQO ++ cat /tmp/tmp.ccbMBrD4nF ++ rm /tmp/tmp.GKYj6tsrQO /tmp/tmp.ccbMBrD4nF ++ return 0 + client_pod=pxc-client-54c6d79f5-ml89d + wait_pod pxc-client-54c6d79f5-ml89d + local pod=pxc-client-54c6d79f5-ml89d + local max_retry=480 + local ns= ++ echo pxc-client-54c6d79f5-ml89d ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-54c6d79f5-ml89d condition met pxc-client-54c6d79f5-ml89d.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.L6uKB17ktu/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.L6uKB17ktu/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f5FzRZozvw +++ mktemp ++ local LAST_ERR=/tmp/tmp.hyHoQ0QlcV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.f5FzRZozvw ++ cat /tmp/tmp.hyHoQ0QlcV ++ rm /tmp/tmp.f5FzRZozvw /tmp/tmp.hyHoQ0QlcV ++ return 0 + client_pod=pxc-client-54c6d79f5-ml89d + wait_pod pxc-client-54c6d79f5-ml89d + local pod=pxc-client-54c6d79f5-ml89d + local max_retry=480 + local ns= ++ echo pxc-client-54c6d79f5-ml89d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-54c6d79f5-ml89d condition met pxc-client-54c6d79f5-ml89d.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.L6uKB17ktu/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.L6uKB17ktu/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4GkSfKgA7b +++ mktemp ++ local LAST_ERR=/tmp/tmp.6im836rQ30 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4GkSfKgA7b ++ cat /tmp/tmp.6im836rQ30 ++ rm /tmp/tmp.4GkSfKgA7b /tmp/tmp.6im836rQ30 ++ return 0 + client_pod=pxc-client-54c6d79f5-ml89d + wait_pod pxc-client-54c6d79f5-ml89d + local pod=pxc-client-54c6d79f5-ml89d + local max_retry=480 + local ns= ++ echo pxc-client-54c6d79f5-ml89d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-54c6d79f5-ml89d condition met pxc-client-54c6d79f5-ml89d.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.L6uKB17ktu/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.L6uKB17ktu/select-1.sql ++ is_keyring_plugin_in_use upgrade-proxysql ++ local cluster=upgrade-proxysql ++ kubectl_bin exec -it upgrade-proxysql-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x1w19pRQzl +++ mktemp ++ local LAST_ERR=/tmp/tmp.y9KvNtUvEn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it upgrade-proxysql-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.x1w19pRQzl ++ cat /tmp/tmp.y9KvNtUvEn Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.x1w19pRQzl /tmp/tmp.y9KvNtUvEn ++ return 0 + '[' '' ']' + compare_generation 1 proxysql upgrade-proxysql + local generation=1 + local proxy=proxysql + local cluster=upgrade-proxysql + local current_generation + [[ proxysql == \h\a\p\r\o\x\y ]] + containers=(pxc proxysql) + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.THk7jZe0Se +++ mktemp ++ local LAST_ERR=/tmp/tmp.HhzfBqREWM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.THk7jZe0Se ++ cat /tmp/tmp.HhzfBqREWM ++ rm /tmp/tmp.THk7jZe0Se /tmp/tmp.HhzfBqREWM ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.81QLfw0Ymd +++ mktemp ++ local LAST_ERR=/tmp/tmp.vEetxc7B3T ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.81QLfw0Ymd ++ cat /tmp/tmp.vEetxc7B3T ++ rm /tmp/tmp.81QLfw0Ymd /tmp/tmp.vEetxc7B3T ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'upgrade operator' + set +o xtrace ----------------------------------------------------------------------------------- upgrade operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.i6hURyw0e4 ++ mktemp + local LAST_ERR=/tmp/tmp.bflRLser3j + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.i6hURyw0e4 customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.bflRLser3j + rm /tmp/tmp.i6hURyw0e4 /tmp/tmp.bflRLser3j + return 0 + [[ -n pxc-operator ]] + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/deploy/cw-rbac.yaml + kubectl_bin apply -f - + sed -e 's^namespace: .*^namespace: pxc-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.XwffnCldTN ++ mktemp + local LAST_ERR=/tmp/tmp.e5nChiwQ41 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XwffnCldTN clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator configured serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.e5nChiwQ41 + rm /tmp/tmp.XwffnCldTN /tmp/tmp.e5nChiwQ41 + return 0 + kubectl_bin patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68"}]}}}}' -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.qVlEpkInU3 ++ mktemp + local LAST_ERR=/tmp/tmp.aq51yjDNvj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68"}]}}}}' -n pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qVlEpkInU3 deployment.apps/percona-xtradb-cluster-operator patched + cat /tmp/tmp.aq51yjDNvj + rm /tmp/tmp.qVlEpkInU3 /tmp/tmp.aq51yjDNvj + return 0 + kubectl_bin rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.GTbhWah2u5 ++ mktemp + local LAST_ERR=/tmp/tmp.wpfIPxv9vC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GTbhWah2u5 Waiting for deployment "percona-xtradb-cluster-operator" rollout to finish: 0 of 1 updated replicas are available... deployment "percona-xtradb-cluster-operator" successfully rolled out + cat /tmp/tmp.wpfIPxv9vC + rm /tmp/tmp.GTbhWah2u5 /tmp/tmp.wpfIPxv9vC + return 0 + sleep 10 + desc 'wait for operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- wait for operator upgrade ----------------------------------------------------------------------------------- + local i=0 + local max=60 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ grep -vc NAME ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B6jDJqnYCl +++ mktemp ++ local LAST_ERR=/tmp/tmp.HNAjJz8HLA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.B6jDJqnYCl ++ cat /tmp/tmp.HNAjJz8HLA ++ rm /tmp/tmp.B6jDJqnYCl /tmp/tmp.HNAjJz8HLA ++ return 0 + [[ 1 -eq 1 ]] + '[' -n pxc-operator ']' ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.bcdSzzYZSh +++ mktemp ++ local LAST_ERR=/tmp/tmp.6iZ6wK0GNv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bcdSzzYZSh ++ cat /tmp/tmp.6iZ6wK0GNv ++ rm /tmp/tmp.bcdSzzYZSh /tmp/tmp.6iZ6wK0GNv ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.hObvnEpiEG ++ mktemp + local LAST_ERR=/tmp/tmp.epueehM9Xh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hObvnEpiEG Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3" modified. + cat /tmp/tmp.epueehM9Xh + rm /tmp/tmp.hObvnEpiEG /tmp/tmp.epueehM9Xh + return 0 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ grep perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68 ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SfajUhfZZx +++ mktemp ++ local LAST_ERR=/tmp/tmp.sldpySfhU3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SfajUhfZZx ++ cat /tmp/tmp.sldpySfhU3 ++ rm /tmp/tmp.SfajUhfZZx /tmp/tmp.sldpySfhU3 ++ return 0 + wait_pod percona-xtradb-cluster-operator-6dcb8b6685-qhfb4 + local pod=percona-xtradb-cluster-operator-6dcb8b6685-qhfb4 + local max_retry=480 + local ns= ++ echo percona-xtradb-cluster-operator-6dcb8b6685-qhfb4 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-6dcb8b6685-qhfb4 condition met percona-xtradb-cluster-operator-6dcb8b6685-qhfb4.Ok ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.rd0Z1SBwKf +++ mktemp ++ local LAST_ERR=/tmp/tmp.WuAzorMjGt ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rd0Z1SBwKf ++ cat /tmp/tmp.WuAzorMjGt ++ rm /tmp/tmp.rd0Z1SBwKf /tmp/tmp.WuAzorMjGt ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3 --namespace=upgrade-proxysql-23780 ++ mktemp + local LAST_OUT=/tmp/tmp.IIHqulI4rl ++ mktemp + local LAST_ERR=/tmp/tmp.LdIJY872Et + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3 --namespace=upgrade-proxysql-23780 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IIHqulI4rl Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster3" modified. + cat /tmp/tmp.LdIJY872Et + rm /tmp/tmp.IIHqulI4rl /tmp/tmp.LdIJY872Et + return 0 + desc 'check images and generation after operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after operator upgrade ----------------------------------------------------------------------------------- + check_pxc_liveness upgrade-proxysql 3 + local cluster=upgrade-proxysql + local cluster_size=3 + wait_cluster_consistency upgrade-proxysql 3 + local cluster_name=upgrade-proxysql + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size upgrade-proxysql ++ local cluster=upgrade-proxysql +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.b03TjfDgGZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tYbNrtyq14 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.b03TjfDgGZ +++ cat /tmp/tmp.tYbNrtyq14 +++ rm /tmp/tmp.b03TjfDgGZ /tmp/tmp.tYbNrtyq14 +++ return 0 ++ [[ false == \t\r\u\e ]] +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HHyDRLMxoF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1z4nipLuRZ +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.HHyDRLMxoF +++ cat /tmp/tmp.1z4nipLuRZ +++ rm /tmp/tmp.HHyDRLMxoF /tmp/tmp.1z4nipLuRZ +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.alhQqExObR +++ mktemp ++ local LAST_ERR=/tmp/tmp.4NHXBjKAXX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.alhQqExObR ++ cat /tmp/tmp.4NHXBjKAXX ++ rm /tmp/tmp.alhQqExObR /tmp/tmp.4NHXBjKAXX ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.O6vtWRQzBK +++ mktemp ++ local LAST_ERR=/tmp/tmp.YEdbASWQLU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.O6vtWRQzBK ++ cat /tmp/tmp.YEdbASWQLU ++ rm /tmp/tmp.O6vtWRQzBK /tmp/tmp.YEdbASWQLU ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fmYgULBHfc +++ mktemp ++ local LAST_ERR=/tmp/tmp.UtRxIvdJM0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fmYgULBHfc ++ cat /tmp/tmp.UtRxIvdJM0 ++ rm /tmp/tmp.fmYgULBHfc /tmp/tmp.UtRxIvdJM0 ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine upgrade-proxysql +++ local cluster_name=upgrade-proxysql ++++ get_proxy upgrade-proxysql ++++ local target_cluster=upgrade-proxysql +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.tT7whPGR8I ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.CZoJw7iMSn +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.tT7whPGR8I +++++ cat /tmp/tmp.CZoJw7iMSn +++++ rm /tmp/tmp.tT7whPGR8I /tmp/tmp.CZoJw7iMSn +++++ return 0 ++++ [[ false == \t\r\u\e ]] +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.KPhWHQeskV ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.lLI1pa4UoZ +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.KPhWHQeskV +++++ cat /tmp/tmp.lLI1pa4UoZ +++++ rm /tmp/tmp.KPhWHQeskV /tmp/tmp.lLI1pa4UoZ +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo upgrade-proxysql-proxysql ++++ return +++ local cluster_proxy=upgrade-proxysql-proxysql +++ echo proxysql ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jzAC5eEBRX +++ mktemp ++ local LAST_ERR=/tmp/tmp.494texJWt0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jzAC5eEBRX ++ cat /tmp/tmp.494texJWt0 ++ rm /tmp/tmp.jzAC5eEBRX /tmp/tmp.494texJWt0 ++ return 0 + [[ 3 == \3 ]] + wait_for_running upgrade-proxysql-pxc 3 + local name=upgrade-proxysql-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-0 480 + local pod=upgrade-proxysql-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-0 condition met upgrade-proxysql-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-1 480 + local pod=upgrade-proxysql-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-1 condition met upgrade-proxysql-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-2 480 + local pod=upgrade-proxysql-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-2 condition met upgrade-proxysql-pxc-2.Ok ++ seq 0 2 + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MvtTlZQq1p +++ mktemp ++ local LAST_ERR=/tmp/tmp.vEwxIV3v8c ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MvtTlZQq1p ++ cat /tmp/tmp.vEwxIV3v8c ++ rm /tmp/tmp.MvtTlZQq1p /tmp/tmp.vEwxIV3v8c ++ return 0 + client_pod=pxc-client-54c6d79f5-ml89d + wait_pod pxc-client-54c6d79f5-ml89d + local pod=pxc-client-54c6d79f5-ml89d + local max_retry=480 + local ns= ++ echo pxc-client-54c6d79f5-ml89d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-54c6d79f5-ml89d condition met pxc-client-54c6d79f5-ml89d.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.L6uKB17ktu/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.L6uKB17ktu/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rBTdVveAPc +++ mktemp ++ local LAST_ERR=/tmp/tmp.CPps9ncLot ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rBTdVveAPc ++ cat /tmp/tmp.CPps9ncLot ++ rm /tmp/tmp.rBTdVveAPc /tmp/tmp.CPps9ncLot ++ return 0 + client_pod=pxc-client-54c6d79f5-ml89d + wait_pod pxc-client-54c6d79f5-ml89d + local pod=pxc-client-54c6d79f5-ml89d + local max_retry=480 + local ns= ++ echo pxc-client-54c6d79f5-ml89d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-54c6d79f5-ml89d condition met pxc-client-54c6d79f5-ml89d.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.L6uKB17ktu/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.L6uKB17ktu/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.g5d1ujCkPw +++ mktemp ++ local LAST_ERR=/tmp/tmp.u1wU9e28gT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.g5d1ujCkPw ++ cat /tmp/tmp.u1wU9e28gT ++ rm /tmp/tmp.g5d1ujCkPw /tmp/tmp.u1wU9e28gT ++ return 0 + client_pod=pxc-client-54c6d79f5-ml89d + wait_pod pxc-client-54c6d79f5-ml89d + local pod=pxc-client-54c6d79f5-ml89d + local max_retry=480 + local ns= ++ echo pxc-client-54c6d79f5-ml89d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-54c6d79f5-ml89d condition met pxc-client-54c6d79f5-ml89d.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.L6uKB17ktu/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.L6uKB17ktu/select-1.sql ++ kubectl_bin get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8pZqYTJNAF +++ mktemp ++ local LAST_ERR=/tmp/tmp.TaNIR1AptJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8pZqYTJNAF ++ cat /tmp/tmp.TaNIR1AptJ ++ rm /tmp/tmp.8pZqYTJNAF /tmp/tmp.TaNIR1AptJ ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68 == perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qk8cIoPMcP +++ mktemp ++ local LAST_ERR=/tmp/tmp.rJo7GFMq3z ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qk8cIoPMcP ++ cat /tmp/tmp.rJo7GFMq3z ++ rm /tmp/tmp.qk8cIoPMcP /tmp/tmp.rJo7GFMq3z ++ return 0 + [[ percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2 == percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f9HJ8kxgKZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.cyHjIugEwK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.f9HJ8kxgKZ ++ cat /tmp/tmp.cyHjIugEwK ++ rm /tmp/tmp.f9HJ8kxgKZ /tmp/tmp.cyHjIugEwK ++ return 0 + [[ percona/percona-xtradb-cluster-operator:1.14.0-haproxy == percona/percona-xtradb-cluster-operator:1.14.0-haproxy ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zT5cSs7RSs +++ mktemp ++ local LAST_ERR=/tmp/tmp.cJb3W9bcTG ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zT5cSs7RSs ++ cat /tmp/tmp.cJb3W9bcTG ++ rm /tmp/tmp.zT5cSs7RSs /tmp/tmp.cJb3W9bcTG ++ return 0 + [[ percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29 == percona/percona-xtradb-cluster-operator:1.14.0-pxc5.7-backup-pxb2.4.29 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DirEvnKKiL +++ mktemp ++ local LAST_ERR=/tmp/tmp.zGWTcUjDbG ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DirEvnKKiL ++ cat /tmp/tmp.zGWTcUjDbG ++ rm /tmp/tmp.DirEvnKKiL /tmp/tmp.zGWTcUjDbG ++ return 0 + [[ percona/pmm-client:2.41.2 == percona/pmm-client:2.41.2 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sB9jYpfSzr +++ mktemp ++ local LAST_ERR=/tmp/tmp.SO3EU2jlIG ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sB9jYpfSzr ++ cat /tmp/tmp.SO3EU2jlIG ++ rm /tmp/tmp.sB9jYpfSzr /tmp/tmp.SO3EU2jlIG ++ return 0 + [[ percona/percona-xtradb-cluster:5.7.44-31.65 == percona/percona-xtradb-cluster:5.7.44-31.65 ]] + : Operator image has been updated correctly + compare_generation 1 proxysql upgrade-proxysql + local generation=1 + local proxy=proxysql + local cluster=upgrade-proxysql + local current_generation + [[ proxysql == \h\a\p\r\o\x\y ]] + containers=(pxc proxysql) + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9kjdpNk8G3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uJLXx01i9c ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9kjdpNk8G3 ++ cat /tmp/tmp.uJLXx01i9c ++ rm /tmp/tmp.9kjdpNk8G3 /tmp/tmp.uJLXx01i9c ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hXHjTAznGs +++ mktemp ++ local LAST_ERR=/tmp/tmp.vo4LAJboft ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.hXHjTAznGs ++ cat /tmp/tmp.vo4LAJboft ++ rm /tmp/tmp.hXHjTAznGs /tmp/tmp.vo4LAJboft ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'patch pxc images and upgrade' + set +o xtrace ----------------------------------------------------------------------------------- patch pxc images and upgrade ----------------------------------------------------------------------------------- + kubectl_bin patch pxc upgrade-proxysql --type=merge --patch '{ "spec": { "crVersion": "1.15.0", "pxc": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7" }, "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "haproxy": { "image": "perconalab/percona-xtradb-cluster-operator:main-haproxy" }, "proxysql": { "image": "perconalab/percona-xtradb-cluster-operator:main-proxysql" }, "backup": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup" } }}' ++ mktemp + local LAST_OUT=/tmp/tmp.r9zvXSH4vQ ++ mktemp + local LAST_ERR=/tmp/tmp.2JijdaZuuz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc upgrade-proxysql --type=merge --patch '{ "spec": { "crVersion": "1.15.0", "pxc": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7" }, "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "haproxy": { "image": "perconalab/percona-xtradb-cluster-operator:main-haproxy" }, "proxysql": { "image": "perconalab/percona-xtradb-cluster-operator:main-proxysql" }, "backup": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup" } }}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.r9zvXSH4vQ perconaxtradbcluster.pxc.percona.com/upgrade-proxysql patched + cat /tmp/tmp.2JijdaZuuz + rm /tmp/tmp.r9zvXSH4vQ /tmp/tmp.2JijdaZuuz + return 0 + sleep 10 + desc 'check images and generation after full upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after full upgrade ----------------------------------------------------------------------------------- + check_pxc_liveness upgrade-proxysql 3 + local cluster=upgrade-proxysql + local cluster_size=3 + wait_cluster_consistency upgrade-proxysql 3 + local cluster_name=upgrade-proxysql + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size upgrade-proxysql ++ local cluster=upgrade-proxysql +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nKANSB4NSZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uwq4Irl1b8 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.nKANSB4NSZ +++ cat /tmp/tmp.uwq4Irl1b8 +++ rm /tmp/tmp.nKANSB4NSZ /tmp/tmp.uwq4Irl1b8 +++ return 0 ++ [[ false == \t\r\u\e ]] +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JLNgE1ib1z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XMiBqRXYE1 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.JLNgE1ib1z +++ cat /tmp/tmp.XMiBqRXYE1 +++ rm /tmp/tmp.JLNgE1ib1z /tmp/tmp.XMiBqRXYE1 +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k6RqVIjH2c +++ mktemp ++ local LAST_ERR=/tmp/tmp.oRcUBOn5tz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.k6RqVIjH2c ++ cat /tmp/tmp.oRcUBOn5tz ++ rm /tmp/tmp.k6RqVIjH2c /tmp/tmp.oRcUBOn5tz ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XLusYEn7M5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.W1dSs4Baa0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XLusYEn7M5 ++ cat /tmp/tmp.W1dSs4Baa0 ++ rm /tmp/tmp.XLusYEn7M5 /tmp/tmp.W1dSs4Baa0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 0 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AranxkBXMe +++ mktemp ++ local LAST_ERR=/tmp/tmp.bvoNqcWQM7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.AranxkBXMe ++ cat /tmp/tmp.bvoNqcWQM7 ++ rm /tmp/tmp.AranxkBXMe /tmp/tmp.bvoNqcWQM7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 1 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bL9QMk0S9l +++ mktemp ++ local LAST_ERR=/tmp/tmp.VGrkDsGZvU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bL9QMk0S9l ++ cat /tmp/tmp.VGrkDsGZvU ++ rm /tmp/tmp.bL9QMk0S9l /tmp/tmp.VGrkDsGZvU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 2 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rzotzZ8s7R +++ mktemp ++ local LAST_ERR=/tmp/tmp.Dx9jQjdsax ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rzotzZ8s7R ++ cat /tmp/tmp.Dx9jQjdsax ++ rm /tmp/tmp.rzotzZ8s7R /tmp/tmp.Dx9jQjdsax ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 3 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lsTZG8oF7x +++ mktemp ++ local LAST_ERR=/tmp/tmp.8HGGYOk6CY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.lsTZG8oF7x ++ cat /tmp/tmp.8HGGYOk6CY ++ rm /tmp/tmp.lsTZG8oF7x /tmp/tmp.8HGGYOk6CY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 4 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0McsV7b2gX +++ mktemp ++ local LAST_ERR=/tmp/tmp.KLiqSYI3hO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0McsV7b2gX ++ cat /tmp/tmp.KLiqSYI3hO ++ rm /tmp/tmp.0McsV7b2gX /tmp/tmp.KLiqSYI3hO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 5 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p0p1OztOSk +++ mktemp ++ local LAST_ERR=/tmp/tmp.0ElZn3HoMK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.p0p1OztOSk ++ cat /tmp/tmp.0ElZn3HoMK ++ rm /tmp/tmp.p0p1OztOSk /tmp/tmp.0ElZn3HoMK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 6 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uIk8XSpWaV +++ mktemp ++ local LAST_ERR=/tmp/tmp.4u8YHkvw9P ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uIk8XSpWaV ++ cat /tmp/tmp.4u8YHkvw9P ++ rm /tmp/tmp.uIk8XSpWaV /tmp/tmp.4u8YHkvw9P ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 7 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qqh8nCcV8O +++ mktemp ++ local LAST_ERR=/tmp/tmp.e7ttVi7LSp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qqh8nCcV8O ++ cat /tmp/tmp.e7ttVi7LSp ++ rm /tmp/tmp.qqh8nCcV8O /tmp/tmp.e7ttVi7LSp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 8 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WWM1XXlxh3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GoTkx0dT0i ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WWM1XXlxh3 ++ cat /tmp/tmp.GoTkx0dT0i ++ rm /tmp/tmp.WWM1XXlxh3 /tmp/tmp.GoTkx0dT0i ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 9 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ecMhn7tUxP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zxmim5nzJ1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ecMhn7tUxP ++ cat /tmp/tmp.Zxmim5nzJ1 ++ rm /tmp/tmp.ecMhn7tUxP /tmp/tmp.Zxmim5nzJ1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 10 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fbwLJTGNWL +++ mktemp ++ local LAST_ERR=/tmp/tmp.tk0EVLmv89 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fbwLJTGNWL ++ cat /tmp/tmp.tk0EVLmv89 ++ rm /tmp/tmp.fbwLJTGNWL /tmp/tmp.tk0EVLmv89 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 11 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q4aUjcckr6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.69y1Z8PAjs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.q4aUjcckr6 ++ cat /tmp/tmp.69y1Z8PAjs ++ rm /tmp/tmp.q4aUjcckr6 /tmp/tmp.69y1Z8PAjs ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 12 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2o1kXpEcXs +++ mktemp ++ local LAST_ERR=/tmp/tmp.xlRziSNIsz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2o1kXpEcXs ++ cat /tmp/tmp.xlRziSNIsz ++ rm /tmp/tmp.2o1kXpEcXs /tmp/tmp.xlRziSNIsz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 13 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nglSC51qp9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LHiVaTfJu7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nglSC51qp9 ++ cat /tmp/tmp.LHiVaTfJu7 ++ rm /tmp/tmp.nglSC51qp9 /tmp/tmp.LHiVaTfJu7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 14 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uSUHAzOPMo +++ mktemp ++ local LAST_ERR=/tmp/tmp.cY2DF7gTUI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uSUHAzOPMo ++ cat /tmp/tmp.cY2DF7gTUI ++ rm /tmp/tmp.uSUHAzOPMo /tmp/tmp.cY2DF7gTUI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 15 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qi98JkiGnk +++ mktemp ++ local LAST_ERR=/tmp/tmp.1zFXEBLsr3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Qi98JkiGnk ++ cat /tmp/tmp.1zFXEBLsr3 ++ rm /tmp/tmp.Qi98JkiGnk /tmp/tmp.1zFXEBLsr3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 16 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A1CIRqSrjM +++ mktemp ++ local LAST_ERR=/tmp/tmp.dRoIyhn2Gg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.A1CIRqSrjM ++ cat /tmp/tmp.dRoIyhn2Gg ++ rm /tmp/tmp.A1CIRqSrjM /tmp/tmp.dRoIyhn2Gg ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 17 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kF69QqGviE +++ mktemp ++ local LAST_ERR=/tmp/tmp.1Ak9YemfsO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kF69QqGviE ++ cat /tmp/tmp.1Ak9YemfsO ++ rm /tmp/tmp.kF69QqGviE /tmp/tmp.1Ak9YemfsO ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gePHnohqO8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ubIKi9s3B7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gePHnohqO8 ++ cat /tmp/tmp.ubIKi9s3B7 ++ rm /tmp/tmp.gePHnohqO8 /tmp/tmp.ubIKi9s3B7 ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine upgrade-proxysql +++ local cluster_name=upgrade-proxysql ++++ get_proxy upgrade-proxysql ++++ local target_cluster=upgrade-proxysql +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.sptTqTcezm ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.IWL3CVwlM6 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.sptTqTcezm +++++ cat /tmp/tmp.IWL3CVwlM6 +++++ rm /tmp/tmp.sptTqTcezm /tmp/tmp.IWL3CVwlM6 +++++ return 0 ++++ [[ false == \t\r\u\e ]] +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.6BAdMUTffR ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.unehcpe73f +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.6BAdMUTffR +++++ cat /tmp/tmp.unehcpe73f +++++ rm /tmp/tmp.6BAdMUTffR /tmp/tmp.unehcpe73f +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo upgrade-proxysql-proxysql ++++ return +++ local cluster_proxy=upgrade-proxysql-proxysql +++ echo proxysql ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dFuVVOJWN0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.i6GWhNexGk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dFuVVOJWN0 ++ cat /tmp/tmp.i6GWhNexGk ++ rm /tmp/tmp.dFuVVOJWN0 /tmp/tmp.i6GWhNexGk ++ return 0 + [[ 3 == \3 ]] + wait_for_running upgrade-proxysql-pxc 3 + local name=upgrade-proxysql-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-0 480 + local pod=upgrade-proxysql-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-0 condition met upgrade-proxysql-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-1 480 + local pod=upgrade-proxysql-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-1 condition met upgrade-proxysql-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-2 480 + local pod=upgrade-proxysql-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-2 condition met upgrade-proxysql-pxc-2.Ok ++ seq 0 2 + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6hUxS1Mc7b +++ mktemp ++ local LAST_ERR=/tmp/tmp.OiR1imGQiz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6hUxS1Mc7b ++ cat /tmp/tmp.OiR1imGQiz ++ rm /tmp/tmp.6hUxS1Mc7b /tmp/tmp.OiR1imGQiz ++ return 0 + client_pod=pxc-client-54c6d79f5-ml89d + wait_pod pxc-client-54c6d79f5-ml89d + local pod=pxc-client-54c6d79f5-ml89d + local max_retry=480 + local ns= ++ echo pxc-client-54c6d79f5-ml89d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-54c6d79f5-ml89d condition met pxc-client-54c6d79f5-ml89d.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.L6uKB17ktu/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.L6uKB17ktu/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gnsZK7BRyO +++ mktemp ++ local LAST_ERR=/tmp/tmp.qGGPxRZix8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gnsZK7BRyO ++ cat /tmp/tmp.qGGPxRZix8 ++ rm /tmp/tmp.gnsZK7BRyO /tmp/tmp.qGGPxRZix8 ++ return 0 + client_pod=pxc-client-54c6d79f5-ml89d + wait_pod pxc-client-54c6d79f5-ml89d + local pod=pxc-client-54c6d79f5-ml89d + local max_retry=480 + local ns= ++ echo pxc-client-54c6d79f5-ml89d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-54c6d79f5-ml89d condition met pxc-client-54c6d79f5-ml89d.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.L6uKB17ktu/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.L6uKB17ktu/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QiGqzXq8Zs +++ mktemp ++ local LAST_ERR=/tmp/tmp.HuoXwOYnMC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QiGqzXq8Zs ++ cat /tmp/tmp.HuoXwOYnMC ++ rm /tmp/tmp.QiGqzXq8Zs /tmp/tmp.HuoXwOYnMC ++ return 0 + client_pod=pxc-client-54c6d79f5-ml89d + wait_pod pxc-client-54c6d79f5-ml89d + local pod=pxc-client-54c6d79f5-ml89d + local max_retry=480 + local ns= ++ echo pxc-client-54c6d79f5-ml89d ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-54c6d79f5-ml89d condition met pxc-client-54c6d79f5-ml89d.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.L6uKB17ktu/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.L6uKB17ktu/select-1.sql ++ kubectl_bin get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wb5sxRnH3E +++ mktemp ++ local LAST_ERR=/tmp/tmp.n6m7NbJ7VR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wb5sxRnH3E ++ cat /tmp/tmp.n6m7NbJ7VR ++ rm /tmp/tmp.wb5sxRnH3E /tmp/tmp.n6m7NbJ7VR ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68 == perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OwR46KW2I7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qpJWwmp4Qy ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OwR46KW2I7 ++ cat /tmp/tmp.qpJWwmp4Qy ++ rm /tmp/tmp.OwR46KW2I7 /tmp/tmp.qpJWwmp4Qy ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-proxysql == perconalab/percona-xtradb-cluster-operator:main-proxysql ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qUPK8tRMoe +++ mktemp ++ local LAST_ERR=/tmp/tmp.KoOvp0oGc0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qUPK8tRMoe ++ cat /tmp/tmp.KoOvp0oGc0 ++ rm /tmp/tmp.qUPK8tRMoe /tmp/tmp.KoOvp0oGc0 ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-haproxy == perconalab/percona-xtradb-cluster-operator:main-haproxy ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ySnwCxhXwF +++ mktemp ++ local LAST_ERR=/tmp/tmp.N9RGyiMtzF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ySnwCxhXwF ++ cat /tmp/tmp.N9RGyiMtzF ++ rm /tmp/tmp.ySnwCxhXwF /tmp/tmp.N9RGyiMtzF ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup == perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SA0ffSEbb5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.iMFzbW4jQV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SA0ffSEbb5 ++ cat /tmp/tmp.iMFzbW4jQV ++ rm /tmp/tmp.SA0ffSEbb5 /tmp/tmp.iMFzbW4jQV ++ return 0 + [[ perconalab/pmm-client:dev-latest == perconalab/pmm-client:dev-latest ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6XSZimrcbY +++ mktemp ++ local LAST_ERR=/tmp/tmp.SxbMtjWNW1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6XSZimrcbY ++ cat /tmp/tmp.SxbMtjWNW1 ++ rm /tmp/tmp.6XSZimrcbY /tmp/tmp.SxbMtjWNW1 ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 == perconalab/percona-xtradb-cluster-operator:main-pxc5.7 ]] + : Cluster images have been updated correctly + compare_generation 2 proxysql upgrade-proxysql + local generation=2 + local proxy=proxysql + local cluster=upgrade-proxysql + local current_generation + [[ proxysql == \h\a\p\r\o\x\y ]] + containers=(pxc proxysql) + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Sgud5aHHvp +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z5SwNXHvDI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Sgud5aHHvp ++ cat /tmp/tmp.Z5SwNXHvDI ++ rm /tmp/tmp.Sgud5aHHvp /tmp/tmp.Z5SwNXHvDI ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CayAvy8rys +++ mktemp ++ local LAST_ERR=/tmp/tmp.D5MbLybsZZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CayAvy8rys ++ cat /tmp/tmp.D5MbLybsZZ ++ rm /tmp/tmp.CayAvy8rys /tmp/tmp.D5MbLybsZZ ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_kubectl statefulset/upgrade-proxysql-pxc + local resource=statefulset/upgrade-proxysql-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc.yml + local new_result=/tmp/tmp.L6uKB17ktu/statefulset_upgrade-proxysql-pxc.yml + desc 'compare statefulset/upgrade-proxysql-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/upgrade-proxysql-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-eks.yml ']' + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.27' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-eks.yml ']' + kubectl_bin get -o yaml statefulset/upgrade-proxysql-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-proxysql-23780", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.wUbbUE29QB ++ mktemp + local LAST_ERR=/tmp/tmp.uJJRbAPWcx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/upgrade-proxysql-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wUbbUE29QB + cat /tmp/tmp.uJJRbAPWcx + rm /tmp/tmp.wUbbUE29QB /tmp/tmp.uJJRbAPWcx + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc.yml /tmp/tmp.L6uKB17ktu/statefulset_upgrade-proxysql-pxc.yml + compare_kubectl statefulset/upgrade-proxysql-proxysql + local resource=statefulset/upgrade-proxysql-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql.yml + local new_result=/tmp/tmp.L6uKB17ktu/statefulset_upgrade-proxysql-proxysql.yml + desc 'compare statefulset/upgrade-proxysql-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/upgrade-proxysql-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-eks.yml ']' + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.27' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-eks.yml ']' + kubectl_bin get -o yaml statefulset/upgrade-proxysql-proxysql ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-proxysql-23780", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.vG0LAQ3EFQ ++ mktemp + local LAST_ERR=/tmp/tmp.zcfGvx2xiF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/upgrade-proxysql-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vG0LAQ3EFQ + cat /tmp/tmp.zcfGvx2xiF + rm /tmp/tmp.vG0LAQ3EFQ /tmp/tmp.zcfGvx2xiF + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql.yml /tmp/tmp.L6uKB17ktu/statefulset_upgrade-proxysql-proxysql.yml + desc cleanup + set +o xtrace ----------------------------------------------------------------------------------- cleanup ----------------------------------------------------------------------------------- + destroy upgrade-proxysql-23780 + local namespace=upgrade-proxysql-23780 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + grep -v level=info + grep -v 'the object has been modified' + grep -v 'get backup status: Job.batch' + sort -u ++ get_operator_pod + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' ++ local label_prefix=app.kubernetes.io/ + tee /tmp/tmp.L6uKB17ktu/operator.log +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.OYmyyaVmxs +++ mktemp ++ local LAST_ERR=/tmp/tmp.cyJSwzMjCu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OYmyyaVmxs ++ cat /tmp/tmp.cyJSwzMjCu ++ rm /tmp/tmp.OYmyyaVmxs /tmp/tmp.cyJSwzMjCu ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-6dcb8b6685-qhfb4 ++ mktemp + local LAST_OUT=/tmp/tmp.f0j7NXP1SG ++ mktemp + local LAST_ERR=/tmp/tmp.mNwMuI43bF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-6dcb8b6685-qhfb4 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.f0j7NXP1SG + cat /tmp/tmp.mNwMuI43bF + rm /tmp/tmp.f0j7NXP1SG /tmp/tmp.mNwMuI43bF + return 0 2024-06-28T09:12:34.214Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.26.15-gke.1436000"} 2024-06-28T09:12:34.215Z INFO setup Manager starting up {"gitCommit": "0a840b68490b2f8881fb749474303f7fb8a1239d", "gitBranch": "PR-1740-0a840b68", "buildTime": "2024-06-28T06:58:02Z", "goVersion": "go1.22.4", "os": "linux", "arch": "amd64"} 2024-06-28T09:12:34.215Z INFO setup Registering Components. 2024-06-28T09:12:36.916Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2024-06-28T09:12:37.009Z INFO setup Starting the Cmd. 2024-06-28T09:12:37.010Z INFO controller-runtime.certwatcher Updated current TLS certificate 2024-06-28T09:12:37.010Z INFO controller-runtime.metrics Starting metrics server 2024-06-28T09:12:37.010Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2024-06-28T09:12:37.010Z INFO controller-runtime.webhook Starting webhook server 2024-06-28T09:12:37.010Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2024-06-28T09:12:37.011Z INFO controller-runtime.certwatcher Starting certificate watcher 2024-06-28T09:12:37.011Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2024-06-28T09:12:37.613Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2024-06-28T09:12:55.849Z DEBUG events percona-xtradb-cluster-operator-6dcb8b6685-qhfb4_8de297dd-d297-4449-8670-7709b4885a4c became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"7199fc42-8b6f-45b8-9473-57e7fe72718b","apiVersion":"coordination.k8s.io/v1","resourceVersion":"72000"}, "reason": "LeaderElection"} 2024-06-28T09:12:55.849Z INFO Starting Controller {"controller": "pxcbackup-controller"} 2024-06-28T09:12:55.849Z INFO Starting Controller {"controller": "pxc-controller"} 2024-06-28T09:12:55.849Z INFO Starting Controller {"controller": "pxcrestore-controller"} 2024-06-28T09:12:55.849Z INFO Starting EventSource {"controller": "pxcbackup-controller", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2024-06-28T09:12:55.849Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.PerconaXtraDBCluster"} 2024-06-28T09:12:55.849Z INFO Starting EventSource {"controller": "pxcrestore-controller", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2024-06-28T09:12:55.849Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2024-06-28T09:12:55.961Z INFO Starting workers {"controller": "pxcbackup-controller", "worker count": 1} 2024-06-28T09:12:55.961Z INFO Starting workers {"controller": "pxc-controller", "worker count": 1} 2024-06-28T09:12:55.961Z INFO Starting workers {"controller": "pxcrestore-controller", "worker count": 1} 2024-06-28T09:12:57.640Z INFO Creating or updating backup job {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "066ee801-4872-44bc-81d3-0dd2aceacf94", "name": "4abb5-daily-backup", "schedule": "0 0 * * *"} 2024-06-28T09:12:59.546Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "066ee801-4872-44bc-81d3-0dd2aceacf94"} 2024-06-28T09:13:06.229Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "48ac32e0-9cd1-4ca8-8c0c-f9fd8ffaa1b5"} 2024-06-28T09:13:12.551Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "32d58bd4-eb12-4ae3-a941-3e705886ac41"} 2024-06-28T09:13:18.726Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "8624c5fb-b673-4a80-a048-b8f4f425ac15"} 2024-06-28T09:13:25.302Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e3e5631b-83f1-47b0-a3df-51aed27c4b5f"} 2024-06-28T09:13:31.740Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e0978cc7-b299-4474-88c0-9e5f32db6a68"} 2024-06-28T09:13:32.486Z INFO statefulSet was changed, run smart update {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88"} 2024-06-28T09:13:32.493Z INFO apply changes to secondary pod {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-2"} 2024-06-28T09:13:32.493Z INFO primary pod {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-0.upgrade-proxysql-pxc.upgrade-proxysql-23780.svc.cluster.local"} 2024-06-28T09:13:53.073Z INFO pod is waiting {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-2", "reason": "PodInitializing"} 2024-06-28T09:14:22.988Z INFO pod is running {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-2"} 2024-06-28T09:15:23.013Z INFO pod present in hostgroups {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-2"} 2024-06-28T09:15:23.021Z INFO apply changes to secondary pod {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-1"} 2024-06-28T09:15:23.021Z INFO pod is online {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-2"} 2024-06-28T09:15:44.241Z INFO pod is waiting {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-1", "reason": "PodInitializing"} 2024-06-28T09:16:14.217Z INFO pod is running {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-1"} 2024-06-28T09:17:14.243Z INFO pod present in hostgroups {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-1"} 2024-06-28T09:17:14.251Z INFO apply changes to primary pod {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-0"} 2024-06-28T09:17:14.251Z INFO pod is online {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-1"} 2024-06-28T09:18:04.527Z INFO pod is running {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-0"} 2024-06-28T09:19:24.551Z INFO pod present in hostgroups {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-0"} 2024-06-28T09:19:24.559Z INFO pod is online {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "pod name": "upgrade-proxysql-pxc-0"} 2024-06-28T09:19:24.560Z INFO smart update finished {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88"} 2024-06-28T09:19:24.639Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e6d173b4-5c62-4601-acf3-b60aeddc0f88", "new version": "5.7.44-48-57"} 2024-06-28T09:19:26.541Z INFO Waiting for ProxySQL to be ready before smart update {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "61d2352e-5116-4ed8-8cb3-2eaea7d1295d"} 2024-06-28T09:19:30.174Z INFO Waiting for ProxySQL to be ready before smart update {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "c2bfdc84-7b63-4270-b329-b9fdd005b2c7"} 2024-06-28T09:19:35.406Z INFO Waiting for ProxySQL to be ready before smart update {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "87fc17b9-ce37-42e2-921b-fb1f399b90a5"} 2024-06-28T09:20:11.897Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e08bebbd-1e5b-406c-99bc-9a3e44123b6e"} 2024-06-28T09:20:16.568Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "28c1f3ba-99c2-45a3-b3fe-838839f9a5b6"} 2024-06-28T09:20:22.921Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "0fdc0106-e864-4a0c-ac94-607295429411"} 2024-06-28T09:20:29.210Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "9402e999-9258-4f71-b7d4-474668859b82"} 2024-06-28T09:20:35.437Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-23780", "name": "upgrade-proxysql", "reconcileID": "e982416c-ce76-4842-bfa4-156e9d4de6b9"} + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n upgrade-proxysql-23780 upgrade-proxysql --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/upgrade-proxysql patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.zkc5L28A0R ++ mktemp + local LAST_ERR=/tmp/tmp.wSVT7sWPbI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zkc5L28A0R perconaxtradbcluster.pxc.percona.com "upgrade-proxysql" deleted + cat /tmp/tmp.wSVT7sWPbI + rm /tmp/tmp.zkc5L28A0R /tmp/tmp.wSVT7sWPbI + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.42XfJiReOm ++ mktemp + local LAST_ERR=/tmp/tmp.hhDO4YQFTI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.42XfJiReOm No resources found + cat /tmp/tmp.hhDO4YQFTI + rm /tmp/tmp.42XfJiReOm /tmp/tmp.hhDO4YQFTI + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.qlPFlZvPDu ++ mktemp + local LAST_ERR=/tmp/tmp.sqR6KwKIbB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qlPFlZvPDu No resources found + cat /tmp/tmp.sqR6KwKIbB + rm /tmp/tmp.qlPFlZvPDu /tmp/tmp.sqR6KwKIbB + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.HO84TH0nk0 ++ mktemp + local LAST_ERR=/tmp/tmp.299OITr9DM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.HO84TH0nk0 validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.299OITr9DM + rm /tmp/tmp.HO84TH0nk0 /tmp/tmp.299OITr9DM + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace upgrade-proxysql-23780 + rm -rf /tmp/tmp.L6uKB17ktu + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.YhAk9r1DzV ++ mktemp + local LAST_OUT=/tmp/tmp.5MxQyv76Yr ++ kubectl get crd ++ mktemp + local LAST_ERR=/tmp/tmp.Ex4qySYTK4 + local exit_status=0 ++ grep perconaxtradbcluster ++ seq 0 2 + local LAST_ERR=/tmp/tmp.pTto7CM7d1 + local exit_status=0 ++ awk '{print $1}' ++ tr '\n' ' ' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace upgrade-proxysql-23780 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator + kubectl delete crd perconaxtradbclusterbackups.pxc.percona.com perconaxtradbclusterrestores.pxc.percona.com perconaxtradbclusters.pxc.percona.com customresourcedefinition.apiextensions.k8s.io "perconaxtradbclusterbackups.pxc.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaxtradbclusterrestores.pxc.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaxtradbclusters.pxc.percona.com" deleted + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed -----------------------------------------------------------------------------------