Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/logs/upgrade-proxysql-8-0.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + CLUSTER=upgrade-proxysql + CLUSTER_SIZE=3 + TARGET_OPERATOR_VER=1.19.0 + TARGET_IMAGE=perconalab/percona-xtradb-cluster-operator:PR-2154-0538614f + TARGET_IMAGE_PXC=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + TARGET_IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest + TARGET_IMAGE_PROXY=perconalab/percona-xtradb-cluster-operator:main-proxysql + TARGET_IMAGE_HAPROXY=perconalab/percona-xtradb-cluster-operator:main-haproxy + TARGET_IMAGE_BACKUP=perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup + TARGET_IMAGE_LOGCOLLECTOR=perconalab/percona-xtradb-cluster-operator:main-logcollector + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 == *\p\e\r\c\o\n\a\-\x\t\r\a\d\b\-\c\l\u\s\t\e\r\-\o\p\e\r\a\t\o\r* ]] ++ echo -n perconalab/percona-xtradb-cluster-operator:main-pxc8.0 ++ /usr/bin/sed -r 's/.*([0-9].[0-9])$/\1/' + PXC_VER=8.0 ++ curl -s https://check.percona.com/versions/v1/pxc-operator ++ jq -r '.versions[].operator' ++ sort -V ++ tail -n1 + INIT_OPERATOR_VER=1.18.0 + [[ 1.18.0 == \1\.\1\9\.\0 ]] + GIT_TAG=v1.18.0 ++ curl -s 'https://check.percona.com/versions/v1/pxc-operator/1.18.0/latest?databaseVersion=8.0' + INIT_OPERATOR_IMAGES='{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + OPERATOR_NAME=percona-xtradb-cluster-operator ++ jq -r '.versions[].matrix.operator[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + IMAGE=percona/percona-xtradb-cluster-operator:1.18.0 ++ echo perconalab/percona-xtradb-cluster-operator:PR-2154-0538614f ++ cut -d/ -f1 + [[ perconalab == \p\e\r\c\o\n\a\l\a\b ]] + IMAGE=perconalab/percona-xtradb-cluster-operator:1.18.0 ++ jq -r '.versions[].matrix.pxc[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + IMAGE_PXC=percona/percona-xtradb-cluster:8.0.42-33.1 ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.pmm[].imagePath' ++ tail -n1 + IMAGE_PMM_CLIENT=percona/pmm-client:3.3.1 ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.proxysql[].imagePath' + IMAGE_PROXY=percona/proxysql2:2.7.3 ++ jq -r '.versions[].matrix.haproxy[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + IMAGE_HAPROXY=percona/haproxy:2.8.15 ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.backup[].imagePath' + IMAGE_BACKUP=percona/percona-xtrabackup:8.0.35-34.1 ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.logCollector[].imagePath' + IMAGE_LOGCOLLECTOR=percona/fluentbit:4.0.1 + [[ -n '' ]] + [[ 1.19.0 == \1\.\1\8\.\0 ]] + main + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.ni5gKtVoSj ++ mktemp + local LAST_ERR=/tmp/tmp.qSZFAoOLN0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ni5gKtVoSj namespace/cert-manager created + cat /tmp/tmp.qSZFAoOLN0 + rm /tmp/tmp.ni5gKtVoSj /tmp/tmp.qSZFAoOLN0 + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.ybFJaAkRC7 ++ mktemp + local LAST_ERR=/tmp/tmp.X9MtucfAJE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ybFJaAkRC7 namespace/cert-manager labeled + cat /tmp/tmp.X9MtucfAJE + rm /tmp/tmp.ybFJaAkRC7 /tmp/tmp.X9MtucfAJE + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.2DsZNqVv8B ++ mktemp + local LAST_ERR=/tmp/tmp.Ujj2yKkhdF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.2DsZNqVv8B namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.Ujj2yKkhdF Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.2DsZNqVv8B /tmp/tmp.Ujj2yKkhdF + return 0 + '[' '' == 4.10 ']' + sleep 70 + create_infra_gh upgrade-proxysql-18854 v1.18.0 + local ns=upgrade-proxysql-18854 + local git_tag=v1.18.0 + '[' -n pxc-operator ']' + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.oimtHl8czi ++ mktemp + local LAST_ERR=/tmp/tmp.mofGyKuvdY + local exit_status=0 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.LxzXY9112o ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.d8a45to8OT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.oimtHl8czi + cat /tmp/tmp.mofGyKuvdY + rm /tmp/tmp.oimtHl8czi /tmp/tmp.mofGyKuvdY + return 0 namespace "cert-manager" deleted namespace "upgrade-proxysql-12351" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LxzXY9112o namespace "pxc-operator" deleted + cat /tmp/tmp.d8a45to8OT + rm /tmp/tmp.LxzXY9112o /tmp/tmp.d8a45to8OT + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.y5jLKrffvP ++ mktemp + local LAST_ERR=/tmp/tmp.QWW4TzgFb4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.y5jLKrffvP namespace/pxc-operator created + cat /tmp/tmp.QWW4TzgFb4 + rm /tmp/tmp.y5jLKrffvP /tmp/tmp.QWW4TzgFb4 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.zD53J9myuL +++ mktemp ++ local LAST_ERR=/tmp/tmp.On5evSDS2r ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zD53J9myuL ++ cat /tmp/tmp.On5evSDS2r ++ rm /tmp/tmp.zD53J9myuL /tmp/tmp.On5evSDS2r ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.73CnoRx4wf ++ mktemp + local LAST_ERR=/tmp/tmp.vFmGZ48Wmo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.73CnoRx4wf Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9" modified. + cat /tmp/tmp.vFmGZ48Wmo + rm /tmp/tmp.73CnoRx4wf /tmp/tmp.vFmGZ48Wmo + return 0 + deploy_operator_gh v1.18.0 + local git_tag=v1.18.0 + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- ++ kubectl_bin get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JE2sER8x5c +++ mktemp ++ local LAST_ERR=/tmp/tmp.OzWWu3qSyQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JE2sER8x5c ++ cat /tmp/tmp.OzWWu3qSyQ ++ rm /tmp/tmp.JE2sER8x5c /tmp/tmp.OzWWu3qSyQ ++ return 0 + [[ -n perconaxtradbclusters.pxc.percona.com ]] ++ kubectl_bin get crd/perconaxtradbclusters.pxc.percona.com -o 'jsonpath={.spec.versions[?(@.name == "v1-18-0")].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.76gVI4hJmr +++ mktemp ++ local LAST_ERR=/tmp/tmp.9qy9jTnmy7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/perconaxtradbclusters.pxc.percona.com -o 'jsonpath={.spec.versions[?(@.name == "v1-18-0")].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.76gVI4hJmr ++ cat /tmp/tmp.9qy9jTnmy7 ++ rm /tmp/tmp.76gVI4hJmr /tmp/tmp.9qy9jTnmy7 ++ return 0 + [[ -n '' ]] + kubectl_bin apply --server-side --force-conflicts -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ZIKtXBQaZr ++ mktemp + local LAST_ERR=/tmp/tmp.SXZL9He6Hw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ZIKtXBQaZr + cat /tmp/tmp.SXZL9He6Hw + rm /tmp/tmp.ZIKtXBQaZr /tmp/tmp.SXZL9He6Hw + return 0 + local rbac_yaml=rbac + local operator_yaml=operator.yaml + '[' -n pxc-operator ']' + rbac_yaml=cw-rbac + operator_yaml=cw-operator.yaml + apply_rbac_gh cw-rbac v1.18.0 + local operator_namespace=pxc-operator + local rbac=cw-rbac + local git_tag=v1.18.0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/cw-rbac.yaml + /usr/bin/sed -i -e 's^namespace: .*^namespace: pxc-operator^' /tmp/tmp.Wo8QKb5uOB/rbac_v1.18.0.yaml + kubectl_bin apply -f /tmp/tmp.Wo8QKb5uOB/rbac_v1.18.0.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.7nKJGZnVbv ++ mktemp + local LAST_ERR=/tmp/tmp.McImbBoNlJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /tmp/tmp.Wo8QKb5uOB/rbac_v1.18.0.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7nKJGZnVbv clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.McImbBoNlJ + rm /tmp/tmp.7nKJGZnVbv /tmp/tmp.McImbBoNlJ + return 0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/cw-operator.yaml + cat /tmp/tmp.Wo8QKb5uOB/cw-operator.yaml_v1.18.0.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:1.18.0^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' + kubectl_bin apply -n pxc-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.CU00GBqP2T ++ mktemp + local LAST_ERR=/tmp/tmp.cc8G8KxToG + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n pxc-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.CU00GBqP2T deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.cc8G8KxToG + rm /tmp/tmp.CU00GBqP2T /tmp/tmp.cc8G8KxToG + return 0 + sleep 2 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.YQl4YjkEHE +++ mktemp ++ local LAST_ERR=/tmp/tmp.wYUsNWAXRn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YQl4YjkEHE ++ cat /tmp/tmp.wYUsNWAXRn ++ rm /tmp/tmp.YQl4YjkEHE /tmp/tmp.wYUsNWAXRn ++ return 0 + wait_pod percona-xtradb-cluster-operator-85f65db574-fn2sw + local pod=percona-xtradb-cluster-operator-85f65db574-fn2sw + local max_retry=480 + local ns= ++ echo percona-xtradb-cluster-operator-85f65db574-fn2sw ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-85f65db574-fn2sw condition met waiting for pod/percona-xtradb-cluster-operator-85f65db574-fn2sw to become Ready.Ok + create_namespace upgrade-proxysql-18854 + local namespace=upgrade-proxysql-18854 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces upgrade-proxysql-18854' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-proxysql-18854 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-proxysql-18854 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.A7ww94jmxp ++ mktemp + local LAST_OUT=/tmp/tmp.nnlk0R9wrf ++ mktemp + local LAST_ERR=/tmp/tmp.b0HDDW88zV + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.xXi1GtLgMW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-proxysql-18854 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-proxysql-18854 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.A7ww94jmxp + cat /tmp/tmp.b0HDDW88zV + rm /tmp/tmp.A7ww94jmxp /tmp/tmp.b0HDDW88zV + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-proxysql-18854 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.nnlk0R9wrf + cat /tmp/tmp.xXi1GtLgMW Error from server (NotFound): namespaces "upgrade-proxysql-18854" not found + rm /tmp/tmp.nnlk0R9wrf /tmp/tmp.xXi1GtLgMW + return 1 + : + wait_for_delete namespace/upgrade-proxysql-18854 + local res=namespace/upgrade-proxysql-18854 + echo -n 'waiting for namespace/upgrade-proxysql-18854 to be deleted' waiting for namespace/upgrade-proxysql-18854 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "upgrade-proxysql-18854" not found + desc 'create namespace upgrade-proxysql-18854' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-proxysql-18854 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-proxysql-18854 ++ mktemp + local LAST_OUT=/tmp/tmp.Fj5iSctzBC ++ mktemp + local LAST_ERR=/tmp/tmp.Sl5q6M9Roz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace upgrade-proxysql-18854 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Fj5iSctzBC namespace/upgrade-proxysql-18854 created + cat /tmp/tmp.Sl5q6M9Roz + rm /tmp/tmp.Fj5iSctzBC /tmp/tmp.Sl5q6M9Roz + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.SzQ6f9nnyC +++ mktemp ++ local LAST_ERR=/tmp/tmp.RrExeeAXOC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SzQ6f9nnyC ++ cat /tmp/tmp.RrExeeAXOC ++ rm /tmp/tmp.SzQ6f9nnyC /tmp/tmp.RrExeeAXOC ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9 --namespace=upgrade-proxysql-18854 ++ mktemp + local LAST_OUT=/tmp/tmp.QPmYoHJFGl ++ mktemp + local LAST_ERR=/tmp/tmp.u4RxxWMfzl + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9 --namespace=upgrade-proxysql-18854 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QPmYoHJFGl Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9" modified. + cat /tmp/tmp.u4RxxWMfzl + rm /tmp/tmp.QPmYoHJFGl /tmp/tmp.u4RxxWMfzl + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.O1zJq8xvfr ++ mktemp + local LAST_ERR=/tmp/tmp.Ze9nOwkMnZ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.O1zJq8xvfr secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.Ze9nOwkMnZ + rm /tmp/tmp.O1zJq8xvfr /tmp/tmp.Ze9nOwkMnZ + return 0 + local proxy=proxysql + local cr_yaml=/tmp/tmp.Wo8QKb5uOB/cr_1.18.0_proxysql.yaml + prepare_cr_yaml /tmp/tmp.Wo8QKb5uOB/cr_1.18.0_proxysql.yaml proxysql upgrade-proxysql 3 v1.18.0 + local cr_yaml=/tmp/tmp.Wo8QKb5uOB/cr_1.18.0_proxysql.yaml + local proxy=proxysql + local cluster=upgrade-proxysql + local cluster_size=3 + local git_tag=v1.18.0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/cr.yaml + yq eval ' .metadata.name = "upgrade-proxysql" | .spec.secretsName = "my-cluster-secrets" | .spec.vaultSecretName = "some-name-vault" | .spec.sslSecretName = "some-name-ssl" | .spec.sslInternalSecretName = "some-name-ssl-internal" | .spec.upgradeOptions.apply = "disabled" | .spec.pxc.size = 3 | .spec.proxysql.size = 3 | .spec.haproxy.size = 3 | .spec.pxc.image = "-pxc" | .spec.proxysql.image = "-proxysql" | .spec.haproxy.image = "-haproxy" | .spec.backup.image = "-backup" | .spec.backup.storages.minio.s3.credentialsSecret = "minio-secret" | .spec.backup.storages.minio.s3.region = "us-east-1" | .spec.backup.storages.minio.s3.bucket = "operator-testing" | .spec.backup.storages.minio.s3.endpointUrl = "http://minio-service.#namespace:9000/" | .spec.backup.storages.minio.type = "s3" | .spec.pmm.image = "-pmm" ' - + [[ proxysql == \h\a\p\r\o\x\y ]] + yq -i eval ' .spec.haproxy.enabled = false | .spec.proxysql.enabled = true ' /tmp/tmp.Wo8QKb5uOB/cr_1.18.0_proxysql.yaml + [[ -n '' ]] + spinup_pxc upgrade-proxysql /tmp/tmp.Wo8QKb5uOB/cr_1.18.0_proxysql.yaml 3 30 /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/secrets_without_tls.yml + local cluster=upgrade-proxysql + local config=/tmp/tmp.Wo8QKb5uOB/cr_1.18.0_proxysql.yaml + local size=3 + local sleep=30 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/secrets_without_tls.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/secrets_without_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.MYTTt86fej ++ mktemp + local LAST_ERR=/tmp/tmp.b4Sno8AA0Q + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/secrets_without_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MYTTt86fej secret/my-cluster-secrets created + cat /tmp/tmp.b4Sno8AA0Q + rm /tmp/tmp.MYTTt86fej /tmp/tmp.b4Sno8AA0Q + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.42-33.1#' + local LAST_OUT=/tmp/tmp.mLV63piBtt + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.42-33.1#' + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:3.3.1#' ++ mktemp + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/haproxy:2.8.15#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:1.18.0#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/proxysql2:2.7.3#' + local LAST_ERR=/tmp/tmp.9wRPVmpZMp + local exit_status=0 + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtrabackup:8.0.35-34.1#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: percona/fluentbit:4.0.1#' ++ seq 0 2 + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-proxysql-18854~ + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mLV63piBtt deployment.apps/pxc-client created + cat /tmp/tmp.9wRPVmpZMp + rm /tmp/tmp.mLV63piBtt /tmp/tmp.9wRPVmpZMp + return 0 + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + apply_config /tmp/tmp.Wo8QKb5uOB/cr_1.18.0_proxysql.yaml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /tmp/tmp.Wo8QKb5uOB/cr_1.18.0_proxysql.yaml + cat /tmp/tmp.Wo8QKb5uOB/cr_1.18.0_proxysql.yaml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:1.18.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:3.3.1#' + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtrabackup:8.0.35-34.1#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/proxysql2:2.7.3#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/haproxy:2.8.15#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: percona/fluentbit:4.0.1#' + local LAST_OUT=/tmp/tmp.lEzH9fSxkm + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.42-33.1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.42-33.1#' ++ mktemp + local LAST_ERR=/tmp/tmp.9Gt9z7Yj1h + local exit_status=0 + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-proxysql-18854~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lEzH9fSxkm perconaxtradbcluster.pxc.percona.com/upgrade-proxysql created + cat /tmp/tmp.9Gt9z7Yj1h + rm /tmp/tmp.lEzH9fSxkm /tmp/tmp.9Gt9z7Yj1h + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy upgrade-proxysql ++ local target_cluster=upgrade-proxysql +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DUu2yCaOFY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oYHiABh17k +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.DUu2yCaOFY +++ cat /tmp/tmp.oYHiABh17k +++ rm /tmp/tmp.DUu2yCaOFY /tmp/tmp.oYHiABh17k +++ return 0 ++ [[ false == \t\r\u\e ]] +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hR9oa1u0k1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yf1acZVS6t +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.hR9oa1u0k1 +++ cat /tmp/tmp.yf1acZVS6t +++ rm /tmp/tmp.hR9oa1u0k1 /tmp/tmp.yf1acZVS6t +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo upgrade-proxysql-proxysql ++ return + local proxy=upgrade-proxysql-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-proxysql-18854 ++ mktemp + local LAST_OUT=/tmp/tmp.BxF6MyXzgt ++ mktemp + local LAST_ERR=/tmp/tmp.QZWfoABnzb + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-proxysql-18854 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-proxysql-18854 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-proxysql-18854 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.BxF6MyXzgt + cat /tmp/tmp.QZWfoABnzb error: no matching resources found + rm /tmp/tmp.BxF6MyXzgt /tmp/tmp.QZWfoABnzb + return 1 + true + wait_for_running upgrade-proxysql-proxysql 1 + local name=upgrade-proxysql-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-proxysql-0 480 + local pod=upgrade-proxysql-proxysql-0 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace pod/upgrade-proxysql-proxysql-0 condition met waiting for pod/upgrade-proxysql-proxysql-0 to become Ready.Ok + wait_for_running upgrade-proxysql-pxc 3 + local name=upgrade-proxysql-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-0 480 + local pod=upgrade-proxysql-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-0 condition met waiting for pod/upgrade-proxysql-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-1 480 + local pod=upgrade-proxysql-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-1 condition met waiting for pod/upgrade-proxysql-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-2 480 + local pod=upgrade-proxysql-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-2 condition met waiting for pod/upgrade-proxysql-pxc-2 to become Ready.Ok + sleep 30 ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.sHyxGmrcJd +++ mktemp ++ local LAST_ERR=/tmp/tmp.p5qkW1B1I1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sHyxGmrcJd ++ cat /tmp/tmp.p5qkW1B1I1 ++ rm /tmp/tmp.sHyxGmrcJd /tmp/tmp.p5qkW1B1I1 ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h upgrade-proxysql-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h upgrade-proxysql-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RkerOI25ju +++ mktemp ++ local LAST_ERR=/tmp/tmp.ovdzY6bTyp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RkerOI25ju ++ cat /tmp/tmp.ovdzY6bTyp ++ rm /tmp/tmp.RkerOI25ju /tmp/tmp.ovdzY6bTyp ++ return 0 + client_pod=pxc-client-7464c4947b-qhvlf + wait_pod pxc-client-7464c4947b-qhvlf + local pod=pxc-client-7464c4947b-qhvlf + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-qhvlf ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-7464c4947b-qhvlf condition met waiting for pod/pxc-client-7464c4947b-qhvlf to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h upgrade-proxysql-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h upgrade-proxysql-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cpVD3IzJB3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZIQq1dZvxc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.cpVD3IzJB3 ++ cat /tmp/tmp.ZIQq1dZvxc ++ rm /tmp/tmp.cpVD3IzJB3 /tmp/tmp.ZIQq1dZvxc ++ return 0 + client_pod=pxc-client-7464c4947b-qhvlf + wait_pod pxc-client-7464c4947b-qhvlf + local pod=pxc-client-7464c4947b-qhvlf + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-qhvlf ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-qhvlf condition met waiting for pod/pxc-client-7464c4947b-qhvlf to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NsLrcXr6bu +++ mktemp ++ local LAST_ERR=/tmp/tmp.dmKiFFrNEi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NsLrcXr6bu ++ cat /tmp/tmp.dmKiFFrNEi ++ rm /tmp/tmp.NsLrcXr6bu /tmp/tmp.dmKiFFrNEi ++ return 0 + client_pod=pxc-client-7464c4947b-qhvlf + wait_pod pxc-client-7464c4947b-qhvlf + local pod=pxc-client-7464c4947b-qhvlf + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-qhvlf ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-qhvlf condition met waiting for pod/pxc-client-7464c4947b-qhvlf to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Wo8QKb5uOB/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.Wo8QKb5uOB/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.afdHLxmskf +++ mktemp ++ local LAST_ERR=/tmp/tmp.70ibWlh6Hn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.afdHLxmskf ++ cat /tmp/tmp.70ibWlh6Hn ++ rm /tmp/tmp.afdHLxmskf /tmp/tmp.70ibWlh6Hn ++ return 0 + client_pod=pxc-client-7464c4947b-qhvlf + wait_pod pxc-client-7464c4947b-qhvlf + local pod=pxc-client-7464c4947b-qhvlf + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-qhvlf ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-qhvlf condition met waiting for pod/pxc-client-7464c4947b-qhvlf to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Wo8QKb5uOB/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.Wo8QKb5uOB/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IcdYmZOvVe +++ mktemp ++ local LAST_ERR=/tmp/tmp.XqdJoDR7FZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IcdYmZOvVe ++ cat /tmp/tmp.XqdJoDR7FZ ++ rm /tmp/tmp.IcdYmZOvVe /tmp/tmp.XqdJoDR7FZ ++ return 0 + client_pod=pxc-client-7464c4947b-qhvlf + wait_pod pxc-client-7464c4947b-qhvlf + local pod=pxc-client-7464c4947b-qhvlf + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-qhvlf ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-qhvlf condition met waiting for pod/pxc-client-7464c4947b-qhvlf to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Wo8QKb5uOB/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.Wo8QKb5uOB/select-1.sql ++ is_keyring_plugin_in_use upgrade-proxysql ++ local cluster=upgrade-proxysql ++ kubectl_bin exec -it upgrade-proxysql-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q8ORX5uXK7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.H5k0d0SUlW ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it upgrade-proxysql-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Q8ORX5uXK7 ++ cat /tmp/tmp.H5k0d0SUlW Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.Q8ORX5uXK7 /tmp/tmp.H5k0d0SUlW ++ return 0 + '[' '' ']' + compare_generation 1 proxysql upgrade-proxysql + local generation=1 + local proxy=proxysql + local cluster=upgrade-proxysql + local current_generation + [[ proxysql == \h\a\p\r\o\x\y ]] + containers=(pxc proxysql) + for container in '"${containers[@]}"' + check_generation 1 pxc upgrade-proxysql + local generation=1 + local container=pxc + local cluster=upgrade-proxysql + local current_generation ++ kubectl_bin get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bkBwgFjHgX +++ mktemp ++ local LAST_ERR=/tmp/tmp.FeW0wSmlJt ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bkBwgFjHgX ++ cat /tmp/tmp.FeW0wSmlJt ++ rm /tmp/tmp.bkBwgFjHgX /tmp/tmp.FeW0wSmlJt ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + for container in '"${containers[@]}"' + check_generation 1 proxysql upgrade-proxysql + local generation=1 + local container=proxysql + local cluster=upgrade-proxysql + local current_generation ++ kubectl_bin get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2Amib9y0RV +++ mktemp ++ local LAST_ERR=/tmp/tmp.3V7dX1IlTA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2Amib9y0RV ++ cat /tmp/tmp.3V7dX1IlTA ++ rm /tmp/tmp.2Amib9y0RV /tmp/tmp.3V7dX1IlTA ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'upgrade operator' + set +o xtrace ----------------------------------------------------------------------------------- upgrade operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.hrCqTZi38r ++ mktemp + local LAST_ERR=/tmp/tmp.IrG3MtiC1J + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hrCqTZi38r customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.IrG3MtiC1J + rm /tmp/tmp.hrCqTZi38r /tmp/tmp.IrG3MtiC1J + return 0 + [[ -n pxc-operator ]] + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.6hdY9DQBup ++ mktemp + local LAST_ERR=/tmp/tmp.wK7ay07Xev + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6hdY9DQBup clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.wK7ay07Xev + rm /tmp/tmp.6hdY9DQBup /tmp/tmp.wK7ay07Xev + return 0 + kubectl_bin patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-2154-0538614f"}]}}}}' -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.EUW1SgVe5n ++ mktemp + local LAST_ERR=/tmp/tmp.As3ILi6FYQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-2154-0538614f"}]}}}}' -n pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.EUW1SgVe5n deployment.apps/percona-xtradb-cluster-operator patched + cat /tmp/tmp.As3ILi6FYQ + rm /tmp/tmp.EUW1SgVe5n /tmp/tmp.As3ILi6FYQ + return 0 + kubectl_bin rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.wocl74kkUX ++ mktemp + local LAST_ERR=/tmp/tmp.Msm14y8JIK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wocl74kkUX Waiting for deployment "percona-xtradb-cluster-operator" rollout to finish: 0 out of 1 new replicas have been updated... Waiting for deployment "percona-xtradb-cluster-operator" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "percona-xtradb-cluster-operator" rollout to finish: 0 of 1 updated replicas are available... deployment "percona-xtradb-cluster-operator" successfully rolled out + cat /tmp/tmp.Msm14y8JIK + rm /tmp/tmp.wocl74kkUX /tmp/tmp.Msm14y8JIK + return 0 + sleep 10 + desc 'wait for operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- wait for operator upgrade ----------------------------------------------------------------------------------- + local i=0 + local max=60 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ grep -vc NAME ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vJarIGp5ZM +++ mktemp ++ local LAST_ERR=/tmp/tmp.R6r06l2gJO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vJarIGp5ZM ++ cat /tmp/tmp.R6r06l2gJO ++ rm /tmp/tmp.vJarIGp5ZM /tmp/tmp.R6r06l2gJO ++ return 0 + [[ 1 -eq 1 ]] + '[' -n pxc-operator ']' ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.R1EEMqRQsM +++ mktemp ++ local LAST_ERR=/tmp/tmp.CyBDmYkj6k ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.R1EEMqRQsM ++ cat /tmp/tmp.CyBDmYkj6k ++ rm /tmp/tmp.R1EEMqRQsM /tmp/tmp.CyBDmYkj6k ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.mWX4Vg7Dz4 ++ mktemp + local LAST_ERR=/tmp/tmp.X3vJlWrUML + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mWX4Vg7Dz4 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9" modified. + cat /tmp/tmp.X3vJlWrUML + rm /tmp/tmp.mWX4Vg7Dz4 /tmp/tmp.X3vJlWrUML + return 0 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ grep perconalab/percona-xtradb-cluster-operator:PR-2154-0538614f ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ETBiXw2Q1D +++ mktemp ++ local LAST_ERR=/tmp/tmp.rHJKd54nPZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ETBiXw2Q1D ++ cat /tmp/tmp.rHJKd54nPZ ++ rm /tmp/tmp.ETBiXw2Q1D /tmp/tmp.rHJKd54nPZ ++ return 0 + wait_pod percona-xtradb-cluster-operator-5fc77b6d5-dbv72 + local pod=percona-xtradb-cluster-operator-5fc77b6d5-dbv72 + local max_retry=480 + local ns= ++ echo percona-xtradb-cluster-operator-5fc77b6d5-dbv72 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-5fc77b6d5-dbv72 condition met waiting for pod/percona-xtradb-cluster-operator-5fc77b6d5-dbv72 to become Ready.Ok ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.BwrYN30hSd +++ mktemp ++ local LAST_ERR=/tmp/tmp.fGnIxEsSL1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.BwrYN30hSd ++ cat /tmp/tmp.fGnIxEsSL1 ++ rm /tmp/tmp.BwrYN30hSd /tmp/tmp.fGnIxEsSL1 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9 --namespace=upgrade-proxysql-18854 ++ mktemp + local LAST_OUT=/tmp/tmp.6U6jis0Zc2 ++ mktemp + local LAST_ERR=/tmp/tmp.Ex8GYPkAgr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9 --namespace=upgrade-proxysql-18854 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6U6jis0Zc2 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-0538614f-6-cluster9" modified. + cat /tmp/tmp.Ex8GYPkAgr + rm /tmp/tmp.6U6jis0Zc2 /tmp/tmp.Ex8GYPkAgr + return 0 + desc 'check images and generation after operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after operator upgrade ----------------------------------------------------------------------------------- + check_pxc_liveness upgrade-proxysql 3 + local cluster=upgrade-proxysql + local cluster_size=3 + wait_cluster_consistency upgrade-proxysql 3 + local cluster_name=upgrade-proxysql + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size upgrade-proxysql ++ local cluster=upgrade-proxysql +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xFmXjCBZs5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nuyfrCdmZ2 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.xFmXjCBZs5 +++ cat /tmp/tmp.nuyfrCdmZ2 +++ rm /tmp/tmp.xFmXjCBZs5 /tmp/tmp.nuyfrCdmZ2 +++ return 0 ++ [[ false == \t\r\u\e ]] +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cTMwoQASrF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cIqBnNRhbI +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.cTMwoQASrF +++ cat /tmp/tmp.cIqBnNRhbI +++ rm /tmp/tmp.cTMwoQASrF /tmp/tmp.cIqBnNRhbI +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.O08H7cEPg4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.I5Ni7XM94Y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.O08H7cEPg4 ++ cat /tmp/tmp.I5Ni7XM94Y ++ rm /tmp/tmp.O08H7cEPg4 /tmp/tmp.I5Ni7XM94Y ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/upgrade-proxysql to be ready' waiting for pxc/upgrade-proxysql to be ready++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QKzdKUGyra +++ mktemp ++ local LAST_ERR=/tmp/tmp.GJz1e09NwZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QKzdKUGyra ++ cat /tmp/tmp.GJz1e09NwZ ++ rm /tmp/tmp.QKzdKUGyra /tmp/tmp.GJz1e09NwZ ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x2kovqAxs8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XbhagMzLCV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.x2kovqAxs8 ++ cat /tmp/tmp.XbhagMzLCV ++ rm /tmp/tmp.x2kovqAxs8 /tmp/tmp.XbhagMzLCV ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine upgrade-proxysql +++ local cluster_name=upgrade-proxysql ++++ get_proxy upgrade-proxysql ++++ local target_cluster=upgrade-proxysql +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.Hotyk2BT6V ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.sVEWF8LCzU +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.Hotyk2BT6V +++++ cat /tmp/tmp.sVEWF8LCzU +++++ rm /tmp/tmp.Hotyk2BT6V /tmp/tmp.sVEWF8LCzU +++++ return 0 ++++ [[ false == \t\r\u\e ]] +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.GPRXCHVKGu ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.PjUOc5Rbio +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.GPRXCHVKGu +++++ cat /tmp/tmp.PjUOc5Rbio +++++ rm /tmp/tmp.GPRXCHVKGu /tmp/tmp.PjUOc5Rbio +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo upgrade-proxysql-proxysql ++++ return +++ local cluster_proxy=upgrade-proxysql-proxysql +++ echo proxysql ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Pu8SHJKCeu +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ty8oZcMYoC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Pu8SHJKCeu ++ cat /tmp/tmp.Ty8oZcMYoC ++ rm /tmp/tmp.Pu8SHJKCeu /tmp/tmp.Ty8oZcMYoC ++ return 0 + [[ 3 == \3 ]] + echo + wait_for_running upgrade-proxysql-pxc 3 + local name=upgrade-proxysql-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-0 480 + local pod=upgrade-proxysql-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-0 condition met waiting for pod/upgrade-proxysql-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-1 480 + local pod=upgrade-proxysql-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-1 condition met waiting for pod/upgrade-proxysql-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-2 480 + local pod=upgrade-proxysql-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-2 condition met waiting for pod/upgrade-proxysql-pxc-2 to become Ready.Ok ++ seq 0 2 + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.18mzPtvLNg +++ mktemp ++ local LAST_ERR=/tmp/tmp.CJzFBCfCa4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.18mzPtvLNg ++ cat /tmp/tmp.CJzFBCfCa4 ++ rm /tmp/tmp.18mzPtvLNg /tmp/tmp.CJzFBCfCa4 ++ return 0 + client_pod=pxc-client-7464c4947b-qhvlf + wait_pod pxc-client-7464c4947b-qhvlf + local pod=pxc-client-7464c4947b-qhvlf + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-qhvlf ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-qhvlf condition met waiting for pod/pxc-client-7464c4947b-qhvlf to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Wo8QKb5uOB/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.Wo8QKb5uOB/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IMAvQr6dIx +++ mktemp ++ local LAST_ERR=/tmp/tmp.fQj2lgE3u6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IMAvQr6dIx ++ cat /tmp/tmp.fQj2lgE3u6 ++ rm /tmp/tmp.IMAvQr6dIx /tmp/tmp.fQj2lgE3u6 ++ return 0 + client_pod=pxc-client-7464c4947b-qhvlf + wait_pod pxc-client-7464c4947b-qhvlf + local pod=pxc-client-7464c4947b-qhvlf + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-qhvlf ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-qhvlf condition met waiting for pod/pxc-client-7464c4947b-qhvlf to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Wo8QKb5uOB/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.Wo8QKb5uOB/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wqyVKry4hr +++ mktemp ++ local LAST_ERR=/tmp/tmp.jHo548IZlC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wqyVKry4hr ++ cat /tmp/tmp.jHo548IZlC ++ rm /tmp/tmp.wqyVKry4hr /tmp/tmp.jHo548IZlC ++ return 0 + client_pod=pxc-client-7464c4947b-qhvlf + wait_pod pxc-client-7464c4947b-qhvlf + local pod=pxc-client-7464c4947b-qhvlf + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-qhvlf ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-qhvlf condition met waiting for pod/pxc-client-7464c4947b-qhvlf to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Wo8QKb5uOB/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.Wo8QKb5uOB/select-1.sql ++ kubectl_bin get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9CBJHpaXTE +++ mktemp ++ local LAST_ERR=/tmp/tmp.g4UTisc84d ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9CBJHpaXTE ++ cat /tmp/tmp.g4UTisc84d ++ rm /tmp/tmp.9CBJHpaXTE /tmp/tmp.g4UTisc84d ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:PR-2154-0538614f == perconalab/percona-xtradb-cluster-operator:PR-2154-0538614f ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fH7xhbfhkr +++ mktemp ++ local LAST_ERR=/tmp/tmp.p6QwxJSYG2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fH7xhbfhkr ++ cat /tmp/tmp.p6QwxJSYG2 ++ rm /tmp/tmp.fH7xhbfhkr /tmp/tmp.p6QwxJSYG2 ++ return 0 + [[ percona/proxysql2:2.7.3 == percona/proxysql2:2.7.3 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JZLGJ8Hqns +++ mktemp ++ local LAST_ERR=/tmp/tmp.zbf4jiXHjn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JZLGJ8Hqns ++ cat /tmp/tmp.zbf4jiXHjn ++ rm /tmp/tmp.JZLGJ8Hqns /tmp/tmp.zbf4jiXHjn ++ return 0 + [[ percona/haproxy:2.8.15 == percona/haproxy:2.8.15 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xDU3WA2JDi +++ mktemp ++ local LAST_ERR=/tmp/tmp.jGRpX1eUeR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xDU3WA2JDi ++ cat /tmp/tmp.jGRpX1eUeR ++ rm /tmp/tmp.xDU3WA2JDi /tmp/tmp.jGRpX1eUeR ++ return 0 + [[ percona/percona-xtrabackup:8.0.35-34.1 == percona/percona-xtrabackup:8.0.35-34.1 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UNFWzYwax2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.woRZ14bflq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.UNFWzYwax2 ++ cat /tmp/tmp.woRZ14bflq ++ rm /tmp/tmp.UNFWzYwax2 /tmp/tmp.woRZ14bflq ++ return 0 + [[ percona/pmm-client:3.3.1 == percona/pmm-client:3.3.1 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.logcollector.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cI2q5piQCL +++ mktemp ++ local LAST_ERR=/tmp/tmp.i0g5wZOYmQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.logcollector.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.cI2q5piQCL ++ cat /tmp/tmp.i0g5wZOYmQ ++ rm /tmp/tmp.cI2q5piQCL /tmp/tmp.i0g5wZOYmQ ++ return 0 + [[ percona/fluentbit:4.0.1 == percona/fluentbit:4.0.1 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LvTr7I7C5P +++ mktemp ++ local LAST_ERR=/tmp/tmp.8JeOxoOngj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LvTr7I7C5P ++ cat /tmp/tmp.8JeOxoOngj ++ rm /tmp/tmp.LvTr7I7C5P /tmp/tmp.8JeOxoOngj ++ return 0 + [[ percona/percona-xtradb-cluster:8.0.42-33.1 == percona/percona-xtradb-cluster:8.0.42-33.1 ]] + : Operator image has been updated correctly + compare_generation 1 proxysql upgrade-proxysql + local generation=1 + local proxy=proxysql + local cluster=upgrade-proxysql + local current_generation + [[ proxysql == \h\a\p\r\o\x\y ]] + containers=(pxc proxysql) + for container in '"${containers[@]}"' + check_generation 1 pxc upgrade-proxysql + local generation=1 + local container=pxc + local cluster=upgrade-proxysql + local current_generation ++ kubectl_bin get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vJEGinU3lE +++ mktemp ++ local LAST_ERR=/tmp/tmp.XciLGTBHBA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vJEGinU3lE ++ cat /tmp/tmp.XciLGTBHBA ++ rm /tmp/tmp.vJEGinU3lE /tmp/tmp.XciLGTBHBA ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + for container in '"${containers[@]}"' + check_generation 1 proxysql upgrade-proxysql + local generation=1 + local container=proxysql + local cluster=upgrade-proxysql + local current_generation ++ kubectl_bin get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4ZjPKuaUQH +++ mktemp ++ local LAST_ERR=/tmp/tmp.6bWMxqDzms ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4ZjPKuaUQH ++ cat /tmp/tmp.6bWMxqDzms ++ rm /tmp/tmp.4ZjPKuaUQH /tmp/tmp.6bWMxqDzms ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'patch pxc images and upgrade' + set +o xtrace ----------------------------------------------------------------------------------- patch pxc images and upgrade ----------------------------------------------------------------------------------- + kubectl_bin patch pxc upgrade-proxysql --type=merge --patch '{ "spec": { "crVersion": "1.19.0", "pxc": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0" }, "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "haproxy": { "image": "perconalab/percona-xtradb-cluster-operator:main-haproxy" }, "proxysql": { "image": "perconalab/percona-xtradb-cluster-operator:main-proxysql" }, "logcollector": { "image": "perconalab/percona-xtradb-cluster-operator:main-logcollector" }, "backup": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup" } }}' ++ mktemp + local LAST_OUT=/tmp/tmp.BEdIee8yhN ++ mktemp + local LAST_ERR=/tmp/tmp.2WLWujigpX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc upgrade-proxysql --type=merge --patch '{ "spec": { "crVersion": "1.19.0", "pxc": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0" }, "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "haproxy": { "image": "perconalab/percona-xtradb-cluster-operator:main-haproxy" }, "proxysql": { "image": "perconalab/percona-xtradb-cluster-operator:main-proxysql" }, "logcollector": { "image": "perconalab/percona-xtradb-cluster-operator:main-logcollector" }, "backup": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup" } }}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BEdIee8yhN perconaxtradbcluster.pxc.percona.com/upgrade-proxysql patched + cat /tmp/tmp.2WLWujigpX + rm /tmp/tmp.BEdIee8yhN /tmp/tmp.2WLWujigpX + return 0 + sleep 10 + desc 'check images and generation after full upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after full upgrade ----------------------------------------------------------------------------------- + check_pxc_liveness upgrade-proxysql 3 + local cluster=upgrade-proxysql + local cluster_size=3 + wait_cluster_consistency upgrade-proxysql 3 + local cluster_name=upgrade-proxysql + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size upgrade-proxysql ++ local cluster=upgrade-proxysql +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9mQty7wAup ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qNM6lah7kp +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.9mQty7wAup +++ cat /tmp/tmp.qNM6lah7kp +++ rm /tmp/tmp.9mQty7wAup /tmp/tmp.qNM6lah7kp +++ return 0 ++ [[ false == \t\r\u\e ]] +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YvvqBjJTZx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1IQPTFcSyw +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.YvvqBjJTZx +++ cat /tmp/tmp.1IQPTFcSyw +++ rm /tmp/tmp.YvvqBjJTZx /tmp/tmp.1IQPTFcSyw +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i8ReyYz1lK +++ mktemp ++ local LAST_ERR=/tmp/tmp.bQssf2cKbr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.i8ReyYz1lK ++ cat /tmp/tmp.bQssf2cKbr ++ rm /tmp/tmp.i8ReyYz1lK /tmp/tmp.bQssf2cKbr ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/upgrade-proxysql to be ready' waiting for pxc/upgrade-proxysql to be ready++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2RtdRUWtaT +++ mktemp ++ local LAST_ERR=/tmp/tmp.f6fV7WYFAH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2RtdRUWtaT ++ cat /tmp/tmp.f6fV7WYFAH ++ rm /tmp/tmp.2RtdRUWtaT /tmp/tmp.f6fV7WYFAH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 0 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.isrpgTur8Z +++ mktemp ++ local LAST_ERR=/tmp/tmp.U9reZ3Zm5p ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.isrpgTur8Z ++ cat /tmp/tmp.U9reZ3Zm5p ++ rm /tmp/tmp.isrpgTur8Z /tmp/tmp.U9reZ3Zm5p ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 1 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bX8NutWoW3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.k8AKHT2buG ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bX8NutWoW3 ++ cat /tmp/tmp.k8AKHT2buG ++ rm /tmp/tmp.bX8NutWoW3 /tmp/tmp.k8AKHT2buG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 2 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nEKRRi2Pog +++ mktemp ++ local LAST_ERR=/tmp/tmp.OmHqR7YZ9r ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nEKRRi2Pog ++ cat /tmp/tmp.OmHqR7YZ9r ++ rm /tmp/tmp.nEKRRi2Pog /tmp/tmp.OmHqR7YZ9r ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 3 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FlMY7njAnk +++ mktemp ++ local LAST_ERR=/tmp/tmp.KMM4wPyFvJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.FlMY7njAnk ++ cat /tmp/tmp.KMM4wPyFvJ ++ rm /tmp/tmp.FlMY7njAnk /tmp/tmp.KMM4wPyFvJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 4 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uDPDxECrJQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.1YjBDRHrdq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uDPDxECrJQ ++ cat /tmp/tmp.1YjBDRHrdq ++ rm /tmp/tmp.uDPDxECrJQ /tmp/tmp.1YjBDRHrdq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 5 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8Gz5XGeLGK +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yyk4NGKIIG ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8Gz5XGeLGK ++ cat /tmp/tmp.Yyk4NGKIIG ++ rm /tmp/tmp.8Gz5XGeLGK /tmp/tmp.Yyk4NGKIIG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 6 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8lSHcWsqgf +++ mktemp ++ local LAST_ERR=/tmp/tmp.rMW7CBk6A6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8lSHcWsqgf ++ cat /tmp/tmp.rMW7CBk6A6 ++ rm /tmp/tmp.8lSHcWsqgf /tmp/tmp.rMW7CBk6A6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 7 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fkhiHEa7K7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.buTEqWKAPr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fkhiHEa7K7 ++ cat /tmp/tmp.buTEqWKAPr ++ rm /tmp/tmp.fkhiHEa7K7 /tmp/tmp.buTEqWKAPr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 8 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tU9OkKp6Ol +++ mktemp ++ local LAST_ERR=/tmp/tmp.UymiKN5l7M ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tU9OkKp6Ol ++ cat /tmp/tmp.UymiKN5l7M ++ rm /tmp/tmp.tU9OkKp6Ol /tmp/tmp.UymiKN5l7M ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 9 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ckgy8La4IH +++ mktemp ++ local LAST_ERR=/tmp/tmp.nMcFoM0p3b ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Ckgy8La4IH ++ cat /tmp/tmp.nMcFoM0p3b ++ rm /tmp/tmp.Ckgy8La4IH /tmp/tmp.nMcFoM0p3b ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 10 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VLO5fDdodo +++ mktemp ++ local LAST_ERR=/tmp/tmp.fkcmSDPGXE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.VLO5fDdodo ++ cat /tmp/tmp.fkcmSDPGXE ++ rm /tmp/tmp.VLO5fDdodo /tmp/tmp.fkcmSDPGXE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 11 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.27PraI773X +++ mktemp ++ local LAST_ERR=/tmp/tmp.UB0hC7yJop ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.27PraI773X ++ cat /tmp/tmp.UB0hC7yJop ++ rm /tmp/tmp.27PraI773X /tmp/tmp.UB0hC7yJop ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 12 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JDgAohJZbc +++ mktemp ++ local LAST_ERR=/tmp/tmp.AHQt3XkeH0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JDgAohJZbc ++ cat /tmp/tmp.AHQt3XkeH0 ++ rm /tmp/tmp.JDgAohJZbc /tmp/tmp.AHQt3XkeH0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 13 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D24Moh4h3K +++ mktemp ++ local LAST_ERR=/tmp/tmp.Crv024ZZ47 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.D24Moh4h3K ++ cat /tmp/tmp.Crv024ZZ47 ++ rm /tmp/tmp.D24Moh4h3K /tmp/tmp.Crv024ZZ47 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 14 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tdYN7MkMZ4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.b7Ine1Bbs9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tdYN7MkMZ4 ++ cat /tmp/tmp.b7Ine1Bbs9 ++ rm /tmp/tmp.tdYN7MkMZ4 /tmp/tmp.b7Ine1Bbs9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 15 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DyiSp84gt5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.pvDJG7HsQ1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DyiSp84gt5 ++ cat /tmp/tmp.pvDJG7HsQ1 ++ rm /tmp/tmp.DyiSp84gt5 /tmp/tmp.pvDJG7HsQ1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 16 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JJCLKlVAiS +++ mktemp ++ local LAST_ERR=/tmp/tmp.MkmOeTTEB5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JJCLKlVAiS ++ cat /tmp/tmp.MkmOeTTEB5 ++ rm /tmp/tmp.JJCLKlVAiS /tmp/tmp.MkmOeTTEB5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 17 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w5IYX0W30r +++ mktemp ++ local LAST_ERR=/tmp/tmp.SMVlwPTtIw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.w5IYX0W30r ++ cat /tmp/tmp.SMVlwPTtIw ++ rm /tmp/tmp.w5IYX0W30r /tmp/tmp.SMVlwPTtIw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 18 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XZguiWfvuc +++ mktemp ++ local LAST_ERR=/tmp/tmp.F64YEyyTit ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XZguiWfvuc ++ cat /tmp/tmp.F64YEyyTit ++ rm /tmp/tmp.XZguiWfvuc /tmp/tmp.F64YEyyTit ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 19 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AktiftHYKe +++ mktemp ++ local LAST_ERR=/tmp/tmp.I1ZbDBEzeI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.AktiftHYKe ++ cat /tmp/tmp.I1ZbDBEzeI ++ rm /tmp/tmp.AktiftHYKe /tmp/tmp.I1ZbDBEzeI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 20 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xng3RFLAyU +++ mktemp ++ local LAST_ERR=/tmp/tmp.mW7QxVpJEJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Xng3RFLAyU ++ cat /tmp/tmp.mW7QxVpJEJ ++ rm /tmp/tmp.Xng3RFLAyU /tmp/tmp.mW7QxVpJEJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 21 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3YScq2XkUz +++ mktemp ++ local LAST_ERR=/tmp/tmp.WhLiZkqskU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3YScq2XkUz ++ cat /tmp/tmp.WhLiZkqskU ++ rm /tmp/tmp.3YScq2XkUz /tmp/tmp.WhLiZkqskU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 22 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LFDIALjabl +++ mktemp ++ local LAST_ERR=/tmp/tmp.mzGxe4HsJ8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LFDIALjabl ++ cat /tmp/tmp.mzGxe4HsJ8 ++ rm /tmp/tmp.LFDIALjabl /tmp/tmp.mzGxe4HsJ8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 23 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8HF5ns2Ch5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QBCYkU6Ndk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8HF5ns2Ch5 ++ cat /tmp/tmp.QBCYkU6Ndk ++ rm /tmp/tmp.8HF5ns2Ch5 /tmp/tmp.QBCYkU6Ndk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 24 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VDVV0qlTEb +++ mktemp ++ local LAST_ERR=/tmp/tmp.GdocQxDUjY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.VDVV0qlTEb ++ cat /tmp/tmp.GdocQxDUjY ++ rm /tmp/tmp.VDVV0qlTEb /tmp/tmp.GdocQxDUjY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 25 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SwkyqBQXKl +++ mktemp ++ local LAST_ERR=/tmp/tmp.v7DzSpy3x0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SwkyqBQXKl ++ cat /tmp/tmp.v7DzSpy3x0 ++ rm /tmp/tmp.SwkyqBQXKl /tmp/tmp.v7DzSpy3x0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 26 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QhFvd7hB1J +++ mktemp ++ local LAST_ERR=/tmp/tmp.SvIoss52b9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QhFvd7hB1J ++ cat /tmp/tmp.SvIoss52b9 ++ rm /tmp/tmp.QhFvd7hB1J /tmp/tmp.SvIoss52b9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 27 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.74iNdfvuYR +++ mktemp ++ local LAST_ERR=/tmp/tmp.lN6sHwX9S9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.74iNdfvuYR ++ cat /tmp/tmp.lN6sHwX9S9 ++ rm /tmp/tmp.74iNdfvuYR /tmp/tmp.lN6sHwX9S9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 28 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MEWKtUWxpX +++ mktemp ++ local LAST_ERR=/tmp/tmp.n4S18nMHHS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MEWKtUWxpX ++ cat /tmp/tmp.n4S18nMHHS ++ rm /tmp/tmp.MEWKtUWxpX /tmp/tmp.n4S18nMHHS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 29 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T20qVLkMrt +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kh8lfmeTQB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.T20qVLkMrt ++ cat /tmp/tmp.Kh8lfmeTQB ++ rm /tmp/tmp.T20qVLkMrt /tmp/tmp.Kh8lfmeTQB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 30 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7MDWN792Am +++ mktemp ++ local LAST_ERR=/tmp/tmp.PbfVpZBTK8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7MDWN792Am ++ cat /tmp/tmp.PbfVpZBTK8 ++ rm /tmp/tmp.7MDWN792Am /tmp/tmp.PbfVpZBTK8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 31 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iCKP6jUZTb +++ mktemp ++ local LAST_ERR=/tmp/tmp.eNe1YjpUGk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.iCKP6jUZTb ++ cat /tmp/tmp.eNe1YjpUGk ++ rm /tmp/tmp.iCKP6jUZTb /tmp/tmp.eNe1YjpUGk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 32 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nX6nxupBTH +++ mktemp ++ local LAST_ERR=/tmp/tmp.n0nqSSGgsD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nX6nxupBTH ++ cat /tmp/tmp.n0nqSSGgsD ++ rm /tmp/tmp.nX6nxupBTH /tmp/tmp.n0nqSSGgsD ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 33 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Pzq3BaS1He +++ mktemp ++ local LAST_ERR=/tmp/tmp.QSb3tAcfr6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Pzq3BaS1He ++ cat /tmp/tmp.QSb3tAcfr6 ++ rm /tmp/tmp.Pzq3BaS1He /tmp/tmp.QSb3tAcfr6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 34 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fma4LxwZki +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tu2UFQZfaZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fma4LxwZki ++ cat /tmp/tmp.Tu2UFQZfaZ ++ rm /tmp/tmp.fma4LxwZki /tmp/tmp.Tu2UFQZfaZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 35 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.10Ge8g5oLv +++ mktemp ++ local LAST_ERR=/tmp/tmp.R1Zg5YKIrb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.10Ge8g5oLv ++ cat /tmp/tmp.R1Zg5YKIrb ++ rm /tmp/tmp.10Ge8g5oLv /tmp/tmp.R1Zg5YKIrb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 36 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5kyIKpMwxj +++ mktemp ++ local LAST_ERR=/tmp/tmp.TFftmZgV2U ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.5kyIKpMwxj ++ cat /tmp/tmp.TFftmZgV2U ++ rm /tmp/tmp.5kyIKpMwxj /tmp/tmp.TFftmZgV2U ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 37 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oocNwBi0nE +++ mktemp ++ local LAST_ERR=/tmp/tmp.vjbvGOvsDp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oocNwBi0nE ++ cat /tmp/tmp.vjbvGOvsDp ++ rm /tmp/tmp.oocNwBi0nE /tmp/tmp.vjbvGOvsDp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 38 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M07euUEepu +++ mktemp ++ local LAST_ERR=/tmp/tmp.c5SGsTRDcs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.M07euUEepu ++ cat /tmp/tmp.c5SGsTRDcs ++ rm /tmp/tmp.M07euUEepu /tmp/tmp.c5SGsTRDcs ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 39 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z94aXnTikW +++ mktemp ++ local LAST_ERR=/tmp/tmp.ig3M3PBZGd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Z94aXnTikW ++ cat /tmp/tmp.ig3M3PBZGd ++ rm /tmp/tmp.Z94aXnTikW /tmp/tmp.ig3M3PBZGd ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 40 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B19oeGtN43 +++ mktemp ++ local LAST_ERR=/tmp/tmp.UBwbR5u7hv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.B19oeGtN43 ++ cat /tmp/tmp.UBwbR5u7hv ++ rm /tmp/tmp.B19oeGtN43 /tmp/tmp.UBwbR5u7hv ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 41 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3RPJlOn62k +++ mktemp ++ local LAST_ERR=/tmp/tmp.YI0xT2xVWD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3RPJlOn62k ++ cat /tmp/tmp.YI0xT2xVWD ++ rm /tmp/tmp.3RPJlOn62k /tmp/tmp.YI0xT2xVWD ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 42 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PlgffRhz92 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yr7TrgGs3g ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PlgffRhz92 ++ cat /tmp/tmp.Yr7TrgGs3g ++ rm /tmp/tmp.PlgffRhz92 /tmp/tmp.Yr7TrgGs3g ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 43 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DI0xuN5wph +++ mktemp ++ local LAST_ERR=/tmp/tmp.mzqMkIiafZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DI0xuN5wph ++ cat /tmp/tmp.mzqMkIiafZ ++ rm /tmp/tmp.DI0xuN5wph /tmp/tmp.mzqMkIiafZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 44 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6bmKhxmfZG +++ mktemp ++ local LAST_ERR=/tmp/tmp.pAcxauy1dA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6bmKhxmfZG ++ cat /tmp/tmp.pAcxauy1dA ++ rm /tmp/tmp.6bmKhxmfZG /tmp/tmp.pAcxauy1dA ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 45 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ekND2V9uua +++ mktemp ++ local LAST_ERR=/tmp/tmp.kdLKkVQUCi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ekND2V9uua ++ cat /tmp/tmp.kdLKkVQUCi ++ rm /tmp/tmp.ekND2V9uua /tmp/tmp.kdLKkVQUCi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 46 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YGbgqYVWHs +++ mktemp ++ local LAST_ERR=/tmp/tmp.gg5QXkG2Xj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YGbgqYVWHs ++ cat /tmp/tmp.gg5QXkG2Xj ++ rm /tmp/tmp.YGbgqYVWHs /tmp/tmp.gg5QXkG2Xj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 47 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zwJWb9WxN1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YTQIO1gQSo ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zwJWb9WxN1 ++ cat /tmp/tmp.YTQIO1gQSo ++ rm /tmp/tmp.zwJWb9WxN1 /tmp/tmp.YTQIO1gQSo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 48 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XUzn1HauLn +++ mktemp ++ local LAST_ERR=/tmp/tmp.BRvT62NzZy ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XUzn1HauLn ++ cat /tmp/tmp.BRvT62NzZy ++ rm /tmp/tmp.XUzn1HauLn /tmp/tmp.BRvT62NzZy ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 49 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QtjJCmBSoj +++ mktemp ++ local LAST_ERR=/tmp/tmp.hMwoIGTRgR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QtjJCmBSoj ++ cat /tmp/tmp.hMwoIGTRgR ++ rm /tmp/tmp.QtjJCmBSoj /tmp/tmp.hMwoIGTRgR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 50 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IXWncdl1vb +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fn3GAHnzQZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IXWncdl1vb ++ cat /tmp/tmp.Fn3GAHnzQZ ++ rm /tmp/tmp.IXWncdl1vb /tmp/tmp.Fn3GAHnzQZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 51 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s1RDhkK9CS +++ mktemp ++ local LAST_ERR=/tmp/tmp.6OoItapSHd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.s1RDhkK9CS ++ cat /tmp/tmp.6OoItapSHd ++ rm /tmp/tmp.s1RDhkK9CS /tmp/tmp.6OoItapSHd ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 52 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sK7HV81Kqs +++ mktemp ++ local LAST_ERR=/tmp/tmp.RM0TEaenNJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sK7HV81Kqs ++ cat /tmp/tmp.RM0TEaenNJ ++ rm /tmp/tmp.sK7HV81Kqs /tmp/tmp.RM0TEaenNJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 53 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NmAn5GN4u1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z7OA1Jtxgj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NmAn5GN4u1 ++ cat /tmp/tmp.Z7OA1Jtxgj ++ rm /tmp/tmp.NmAn5GN4u1 /tmp/tmp.Z7OA1Jtxgj ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nYIxeN9rSo +++ mktemp ++ local LAST_ERR=/tmp/tmp.LpCAxMhJDl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nYIxeN9rSo ++ cat /tmp/tmp.LpCAxMhJDl ++ rm /tmp/tmp.nYIxeN9rSo /tmp/tmp.LpCAxMhJDl ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine upgrade-proxysql +++ local cluster_name=upgrade-proxysql ++++ get_proxy upgrade-proxysql ++++ local target_cluster=upgrade-proxysql +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.R42mrVoCsJ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.6rXVugFR9r +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.R42mrVoCsJ +++++ cat /tmp/tmp.6rXVugFR9r +++++ rm /tmp/tmp.R42mrVoCsJ /tmp/tmp.6rXVugFR9r +++++ return 0 ++++ [[ false == \t\r\u\e ]] +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.g37hTc5iWV ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.xpyeNdJjmD +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.g37hTc5iWV +++++ cat /tmp/tmp.xpyeNdJjmD +++++ rm /tmp/tmp.g37hTc5iWV /tmp/tmp.xpyeNdJjmD +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo upgrade-proxysql-proxysql ++++ return +++ local cluster_proxy=upgrade-proxysql-proxysql +++ echo proxysql ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1K6nIb9W9J +++ mktemp ++ local LAST_ERR=/tmp/tmp.lvZfJEhSN8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1K6nIb9W9J ++ cat /tmp/tmp.lvZfJEhSN8 ++ rm /tmp/tmp.1K6nIb9W9J /tmp/tmp.lvZfJEhSN8 ++ return 0 + [[ 3 == \3 ]] + echo + wait_for_running upgrade-proxysql-pxc 3 + local name=upgrade-proxysql-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-0 480 + local pod=upgrade-proxysql-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-0 condition met waiting for pod/upgrade-proxysql-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-1 480 + local pod=upgrade-proxysql-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-1 condition met waiting for pod/upgrade-proxysql-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-2 480 + local pod=upgrade-proxysql-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-proxysql-pxc-2 condition met waiting for pod/upgrade-proxysql-pxc-2 to become Ready.Ok ++ seq 0 2 + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.inpq3pjMaU +++ mktemp ++ local LAST_ERR=/tmp/tmp.qUFE0FD62f ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.inpq3pjMaU ++ cat /tmp/tmp.qUFE0FD62f ++ rm /tmp/tmp.inpq3pjMaU /tmp/tmp.qUFE0FD62f ++ return 0 + client_pod=pxc-client-7464c4947b-qhvlf + wait_pod pxc-client-7464c4947b-qhvlf + local pod=pxc-client-7464c4947b-qhvlf + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-qhvlf ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-qhvlf condition met waiting for pod/pxc-client-7464c4947b-qhvlf to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Wo8QKb5uOB/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.Wo8QKb5uOB/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V4FzTik5dk +++ mktemp ++ local LAST_ERR=/tmp/tmp.MukPrCoyzT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.V4FzTik5dk ++ cat /tmp/tmp.MukPrCoyzT ++ rm /tmp/tmp.V4FzTik5dk /tmp/tmp.MukPrCoyzT ++ return 0 + client_pod=pxc-client-7464c4947b-qhvlf + wait_pod pxc-client-7464c4947b-qhvlf + local pod=pxc-client-7464c4947b-qhvlf + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-qhvlf ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-qhvlf condition met waiting for pod/pxc-client-7464c4947b-qhvlf to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Wo8QKb5uOB/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.Wo8QKb5uOB/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.75tvtg7uq4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.CsWKIL8a1v ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.75tvtg7uq4 ++ cat /tmp/tmp.CsWKIL8a1v ++ rm /tmp/tmp.75tvtg7uq4 /tmp/tmp.CsWKIL8a1v ++ return 0 + client_pod=pxc-client-7464c4947b-qhvlf + wait_pod pxc-client-7464c4947b-qhvlf + local pod=pxc-client-7464c4947b-qhvlf + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-qhvlf ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-qhvlf condition met waiting for pod/pxc-client-7464c4947b-qhvlf to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Wo8QKb5uOB/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.Wo8QKb5uOB/select-1.sql ++ kubectl_bin get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IdjUoqCJNC +++ mktemp ++ local LAST_ERR=/tmp/tmp.rvdacrTZNo ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IdjUoqCJNC ++ cat /tmp/tmp.rvdacrTZNo ++ rm /tmp/tmp.IdjUoqCJNC /tmp/tmp.rvdacrTZNo ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:PR-2154-0538614f == perconalab/percona-xtradb-cluster-operator:PR-2154-0538614f ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i9hYIBaxSh +++ mktemp ++ local LAST_ERR=/tmp/tmp.m41Ltvp9XB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.i9hYIBaxSh ++ cat /tmp/tmp.m41Ltvp9XB ++ rm /tmp/tmp.i9hYIBaxSh /tmp/tmp.m41Ltvp9XB ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-proxysql == perconalab/percona-xtradb-cluster-operator:main-proxysql ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xiuKrIVh5H +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZAUMCUodSs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xiuKrIVh5H ++ cat /tmp/tmp.ZAUMCUodSs ++ rm /tmp/tmp.xiuKrIVh5H /tmp/tmp.ZAUMCUodSs ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-haproxy == perconalab/percona-xtradb-cluster-operator:main-haproxy ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I1IHvIKmWY +++ mktemp ++ local LAST_ERR=/tmp/tmp.WrYlGAPW58 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.I1IHvIKmWY ++ cat /tmp/tmp.WrYlGAPW58 ++ rm /tmp/tmp.I1IHvIKmWY /tmp/tmp.WrYlGAPW58 ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup == perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ffWQLuUBkz +++ mktemp ++ local LAST_ERR=/tmp/tmp.ppJN9ZKPP2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ffWQLuUBkz ++ cat /tmp/tmp.ppJN9ZKPP2 ++ rm /tmp/tmp.ffWQLuUBkz /tmp/tmp.ppJN9ZKPP2 ++ return 0 + [[ perconalab/pmm-client:dev-latest == perconalab/pmm-client:dev-latest ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.logcollector.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P0IZlArqZG +++ mktemp ++ local LAST_ERR=/tmp/tmp.AEj7HMncfV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.logcollector.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.P0IZlArqZG ++ cat /tmp/tmp.AEj7HMncfV ++ rm /tmp/tmp.P0IZlArqZG /tmp/tmp.AEj7HMncfV ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-logcollector == perconalab/percona-xtradb-cluster-operator:main-logcollector ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.alA75HPPwb +++ mktemp ++ local LAST_ERR=/tmp/tmp.EhlJF05NqL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.alA75HPPwb ++ cat /tmp/tmp.EhlJF05NqL ++ rm /tmp/tmp.alA75HPPwb /tmp/tmp.EhlJF05NqL ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 == perconalab/percona-xtradb-cluster-operator:main-pxc8.0 ]] + : Cluster images have been updated correctly + compare_generation 2 proxysql upgrade-proxysql + local generation=2 + local proxy=proxysql + local cluster=upgrade-proxysql + local current_generation + [[ proxysql == \h\a\p\r\o\x\y ]] + containers=(pxc proxysql) + for container in '"${containers[@]}"' + check_generation 2 pxc upgrade-proxysql + local generation=2 + local container=pxc + local cluster=upgrade-proxysql + local current_generation ++ kubectl_bin get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m556SpgIYt +++ mktemp ++ local LAST_ERR=/tmp/tmp.ng1TXrOsnZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.m556SpgIYt ++ cat /tmp/tmp.ng1TXrOsnZ ++ rm /tmp/tmp.m556SpgIYt /tmp/tmp.ng1TXrOsnZ ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + for container in '"${containers[@]}"' + check_generation 2 proxysql upgrade-proxysql + local generation=2 + local container=proxysql + local cluster=upgrade-proxysql + local current_generation ++ kubectl_bin get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XjOhd4YWjv +++ mktemp ++ local LAST_ERR=/tmp/tmp.08D9sMn31A ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XjOhd4YWjv ++ cat /tmp/tmp.08D9sMn31A ++ rm /tmp/tmp.XjOhd4YWjv /tmp/tmp.08D9sMn31A ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_kubectl statefulset/upgrade-proxysql-pxc + local resource=statefulset/upgrade-proxysql-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc.yml + local new_result=/tmp/tmp.Wo8QKb5uOB/statefulset_upgrade-proxysql-pxc.yml + desc 'compare statefulset/upgrade-proxysql-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/upgrade-proxysql-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-eks.yml ']' + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-80.yml ']' + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-aks.yml ']' + kubectl_bin get -o yaml statefulset/upgrade-proxysql-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-proxysql-18854", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.vyrmjrzc3y ++ mktemp + local LAST_ERR=/tmp/tmp.weQ97rx72m + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/upgrade-proxysql-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vyrmjrzc3y + cat /tmp/tmp.weQ97rx72m + rm /tmp/tmp.vyrmjrzc3y /tmp/tmp.weQ97rx72m + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc.yml /tmp/tmp.Wo8QKb5uOB/statefulset_upgrade-proxysql-pxc.yml + compare_kubectl statefulset/upgrade-proxysql-proxysql + local resource=statefulset/upgrade-proxysql-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql.yml + local new_result=/tmp/tmp.Wo8QKb5uOB/statefulset_upgrade-proxysql-proxysql.yml + desc 'compare statefulset/upgrade-proxysql-proxysql-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/upgrade-proxysql-proxysql- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-eks.yml ']' + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-80.yml ']' + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-aks.yml ']' + kubectl_bin get -o yaml statefulset/upgrade-proxysql-proxysql ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-proxysql-18854", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.MQ3BXtHrzQ ++ mktemp + local LAST_ERR=/tmp/tmp.VdHjwnat1r + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/upgrade-proxysql-proxysql + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MQ3BXtHrzQ + cat /tmp/tmp.VdHjwnat1r + rm /tmp/tmp.MQ3BXtHrzQ /tmp/tmp.VdHjwnat1r + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql.yml /tmp/tmp.Wo8QKb5uOB/statefulset_upgrade-proxysql-proxysql.yml + desc cleanup + set +o xtrace ----------------------------------------------------------------------------------- cleanup ----------------------------------------------------------------------------------- + destroy upgrade-proxysql-18854 + local namespace=upgrade-proxysql-18854 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + grep -v 'the object has been modified' + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + grep -v level=info ++ get_operator_pod + sort -u ++ local label_prefix=app.kubernetes.io/ + tee /tmp/tmp.Wo8QKb5uOB/operator.log +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Epr5YDKl5L +++ mktemp ++ local LAST_ERR=/tmp/tmp.tFsysRxl0W ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Epr5YDKl5L ++ cat /tmp/tmp.tFsysRxl0W ++ rm /tmp/tmp.Epr5YDKl5L /tmp/tmp.tFsysRxl0W ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-5fc77b6d5-dbv72 ++ mktemp + local LAST_OUT=/tmp/tmp.tH7hKe1GL4 ++ mktemp + local LAST_ERR=/tmp/tmp.dzyofEWgsq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-5fc77b6d5-dbv72 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tH7hKe1GL4 + cat /tmp/tmp.dzyofEWgsq + rm /tmp/tmp.tH7hKe1GL4 /tmp/tmp.dzyofEWgsq + return 0 2025-11-07T01:02:20.845Z INFO setup Manager starting up {"gitCommit": "0538614fbed20de0f0d7c794f79f89e8a46b8543", "gitBranch": "PR-2154-0538614f", "buildTime": "2025-11-06T22:22:40Z", "goVersion": "go1.25.4", "os": "linux", "arch": "amd64"} 2025-11-07T01:02:20.845Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.31.13-gke.1139000"} 2025-11-07T01:02:20.849Z INFO setup Registering Components. 2025-11-07T01:02:21.537Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2025-11-07T01:02:21.538Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2025-11-07T01:02:21.538Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2025-11-07T01:02:21.538Z INFO controller-runtime.metrics Starting metrics server 2025-11-07T01:02:21.538Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2025-11-07T01:02:21.538Z INFO controller-runtime.webhook Starting webhook server 2025-11-07T01:02:21.538Z INFO setup Starting the Cmd. 2025-11-07T01:02:21.538Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2025-11-07T01:02:21.539Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2025-11-07T01:02:21.639Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2025-11-07T01:02:47.859Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2025-11-07T01:02:47.860Z DEBUG events percona-xtradb-cluster-operator-5fc77b6d5-dbv72_4baff664-4424-4d92-b4c9-27c7b26de4b5 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"0d4392c4-7f9c-40cc-b08d-9d472398bc17","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1762477367853647009"}, "reason": "LeaderElection"} 2025-11-07T01:02:47.860Z INFO Starting EventSource {"controller": "pxcbackup-controller", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2025-11-07T01:02:47.860Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.PerconaXtraDBCluster"} 2025-11-07T01:02:47.860Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.Secret"} 2025-11-07T01:02:47.860Z INFO Starting EventSource {"controller": "pxcrestore-controller", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2025-11-07T01:02:47.961Z INFO Starting Controller {"controller": "pxcbackup-controller"} 2025-11-07T01:02:47.961Z INFO Starting Controller {"controller": "pxc-controller"} 2025-11-07T01:02:47.961Z INFO Starting Controller {"controller": "pxcrestore-controller"} 2025-11-07T01:02:47.961Z INFO Starting workers {"controller": "pxcbackup-controller", "worker count": 1} 2025-11-07T01:02:47.961Z INFO Starting workers {"controller": "pxc-controller", "worker count": 1} 2025-11-07T01:02:47.961Z INFO Starting workers {"controller": "pxcrestore-controller", "worker count": 1} 2025-11-07T01:02:48.391Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "69faeeac-94b4-440e-963a-df074c94ec8a", "object": "upgrade-proxysql-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-07T01:02:48.561Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "69faeeac-94b4-440e-963a-df074c94ec8a", "object": "upgrade-proxysql-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-07T01:02:48.605Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "69faeeac-94b4-440e-963a-df074c94ec8a", "object": "upgrade-proxysql-proxysql", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-07T01:02:48.662Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "69faeeac-94b4-440e-963a-df074c94ec8a", "object": "upgrade-proxysql-proxysql", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-07T01:02:49.809Z INFO Creating or updating backup job {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "69faeeac-94b4-440e-963a-df074c94ec8a", "name": "7b9aa-daily-backup", "schedule": "0 0 * * *"} 2025-11-07T01:02:50.491Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "69faeeac-94b4-440e-963a-df074c94ec8a"} 2025-11-07T01:02:57.320Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "7b6b5921-3028-49a7-a1de-da56cc1cb76d"} 2025-11-07T01:03:03.595Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "fe367b48-c6a5-46ba-a29d-08a781dd917d"} 2025-11-07T01:03:06.698Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "object": "upgrade-proxysql-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-07T01:03:06.748Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "object": "upgrade-proxysql-proxysql", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-07T01:03:07.895Z INFO statefulSet was changed, run smart update {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c"} 2025-11-07T01:03:07.901Z INFO apply changes to secondary pod {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-2"} 2025-11-07T01:03:07.901Z INFO primary pod {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-0.upgrade-proxysql-pxc.upgrade-proxysql-18854.svc.cluster.local"} 2025-11-07T01:03:08.787Z INFO Pod is not updated {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-2", "currentRevision": "upgrade-proxysql-pxc-676d696cf9", "targetRevision": "upgrade-proxysql-pxc-54754d658f"} 2025-11-07T01:03:09.358Z ERROR sync users {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "error": "exec syncusers: failed to execute command in pod: unable to upgrade connection: container not found (\"proxysql\") / / ", "errorVerbose": "exec syncusers: failed to execute command in pod: unable to upgrade connection: container not found (\"proxysql\") / / \ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).syncPXCUsersWithProxySQL\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/users.go:966\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:867\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} 2025-11-07T01:03:18.916Z INFO Pod is not updated {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-2", "currentRevision": "upgrade-proxysql-pxc-676d696cf9", "targetRevision": "upgrade-proxysql-pxc-54754d658f"} 2025-11-07T01:03:28.878Z INFO Pod is not running {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-2", "phase": "Pending"} 2025-11-07T01:03:28.878Z INFO pod is waiting {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-2", "reason": "PodInitializing"} 2025-11-07T01:03:58.916Z INFO Pod is updated, running and ready {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-2"} 2025-11-07T01:04:18.933Z INFO pod present in hostgroups {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-2"} 2025-11-07T01:04:18.937Z INFO pod is online {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-2"} 2025-11-07T01:04:18.938Z INFO apply changes to secondary pod {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-1"} 2025-11-07T01:04:19.141Z INFO Pod is not updated {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-1", "currentRevision": "upgrade-proxysql-pxc-676d696cf9", "targetRevision": "upgrade-proxysql-pxc-54754d658f"} 2025-11-07T01:04:29.267Z INFO Pod is not updated {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-1", "currentRevision": "upgrade-proxysql-pxc-676d696cf9", "targetRevision": "upgrade-proxysql-pxc-54754d658f"} 2025-11-07T01:04:39.234Z INFO Pod is not running {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-1", "phase": "Pending"} 2025-11-07T01:04:39.234Z INFO pod is waiting {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-1", "reason": "PodInitializing"} 2025-11-07T01:05:39.277Z INFO Pod is updated, running and ready {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-1"} 2025-11-07T01:06:59.294Z INFO pod present in hostgroups {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-1"} 2025-11-07T01:06:59.298Z INFO apply changes to primary pod {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-0"} 2025-11-07T01:06:59.298Z INFO pod is online {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-1"} 2025-11-07T01:06:59.545Z INFO Pod is not updated {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-0", "currentRevision": "upgrade-proxysql-pxc-676d696cf9", "targetRevision": "upgrade-proxysql-pxc-54754d658f"} 2025-11-07T01:07:09.782Z INFO Pod is not updated {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-0", "currentRevision": "upgrade-proxysql-pxc-676d696cf9", "targetRevision": "upgrade-proxysql-pxc-54754d658f"} 2025-11-07T01:07:19.640Z INFO Pod is not running {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-0", "phase": "Pending"} 2025-11-07T01:07:19.640Z INFO pod is waiting {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-0", "reason": "PodInitializing"} 2025-11-07T01:07:49.719Z INFO Pod is updated, running and ready {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-0"} 2025-11-07T01:08:19.739Z INFO pod present in hostgroups {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-0"} 2025-11-07T01:08:19.743Z INFO pod is online {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "pod": "upgrade-proxysql-pxc-0"} 2025-11-07T01:08:19.744Z INFO smart update finished {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c"} 2025-11-07T01:08:19.841Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "b582d4c1-b987-4071-934f-cb32231a792c", "new version": "8.0.43-34.1"} 2025-11-07T01:08:23.002Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "df5caafb-5660-4c98-b477-c86b4aa1a463"} 2025-11-07T01:08:27.973Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "61aefa97-c9fe-4b37-a7a3-63a0aebd60a6"} 2025-11-07T01:08:33.890Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "bff5f2b8-34c0-4076-b710-0c598bf4a4d1"} 2025-11-07T01:08:40.851Z DEBUG PXC users synced with ProxySQL {"controller": "pxc-controller", "namespace": "upgrade-proxysql-18854", "name": "upgrade-proxysql", "reconcileID": "06a3fbe9-80c1-411d-82d0-94edb46885ca"} github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).resyncPXCUsersWithProxySQL.func1 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:869 + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n upgrade-proxysql-18854 upgrade-proxysql --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/upgrade-proxysql patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.kGjBGo7N4y ++ mktemp + local LAST_ERR=/tmp/tmp.khXX8EIEgi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kGjBGo7N4y perconaxtradbcluster.pxc.percona.com "upgrade-proxysql" deleted from upgrade-proxysql-18854 namespace + cat /tmp/tmp.khXX8EIEgi + rm /tmp/tmp.kGjBGo7N4y /tmp/tmp.khXX8EIEgi + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.wK3MeFrMqw ++ mktemp + local LAST_ERR=/tmp/tmp.bFq3lkYJUc + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wK3MeFrMqw No resources found + cat /tmp/tmp.bFq3lkYJUc + rm /tmp/tmp.wK3MeFrMqw /tmp/tmp.bFq3lkYJUc + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.7u3C01mYRd ++ mktemp + local LAST_ERR=/tmp/tmp.DRJYmeyf0T + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7u3C01mYRd No resources found + cat /tmp/tmp.DRJYmeyf0T + rm /tmp/tmp.7u3C01mYRd /tmp/tmp.DRJYmeyf0T + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.BzdQ9PP49m ++ mktemp + local LAST_ERR=/tmp/tmp.DFwkeQIr6I + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BzdQ9PP49m validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.DFwkeQIr6I + rm /tmp/tmp.BzdQ9PP49m /tmp/tmp.DFwkeQIr6I + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace upgrade-proxysql-18854 + rm -rf /tmp/tmp.Wo8QKb5uOB + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.0z6LeWr7Cx + local LAST_OUT=/tmp/tmp.qgCEICdQXh ++ mktemp ++ mktemp ++ grep perconaxtradbcluster + local LAST_ERR=/tmp/tmp.5GCmC9JZmm + local exit_status=0 + local LAST_ERR=/tmp/tmp.Vgl94XH00k + local exit_status=0 ++ kubectl get crd ++ awk '{print $1}' ++ seq 0 2 ++ seq 0 2 ++ tr '\n' ' ' + for i in '$(seq 0 2)' + for i in '$(seq 0 2)' + set +e + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator + kubectl delete --grace-period=0 --force=true namespace upgrade-proxysql-18854 + kubectl delete crd perconaxtradbclusterbackups.pxc.percona.com perconaxtradbclusterrestores.pxc.percona.com perconaxtradbclusters.pxc.percona.com customresourcedefinition.apiextensions.k8s.io "perconaxtradbclusterbackups.pxc.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaxtradbclusterrestores.pxc.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaxtradbclusters.pxc.percona.com" deleted + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed -----------------------------------------------------------------------------------