Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/logs/upgrade-haproxy-8-0.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + CLUSTER=upgrade-haproxy + CLUSTER_SIZE=3 + TARGET_OPERATOR_VER=1.19.0 + TARGET_IMAGE=perconalab/percona-xtradb-cluster-operator:PR-2002-42929599 + TARGET_IMAGE_PXC=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + TARGET_IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest + TARGET_IMAGE_PROXY=perconalab/percona-xtradb-cluster-operator:main-proxysql + TARGET_IMAGE_HAPROXY=perconalab/percona-xtradb-cluster-operator:main-haproxy + TARGET_IMAGE_BACKUP=perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup + TARGET_IMAGE_LOGCOLLECTOR=perconalab/percona-xtradb-cluster-operator:main-logcollector + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 == *\p\e\r\c\o\n\a\-\x\t\r\a\d\b\-\c\l\u\s\t\e\r\-\o\p\e\r\a\t\o\r* ]] ++ echo -n perconalab/percona-xtradb-cluster-operator:main-pxc8.0 ++ /usr/bin/sed -r 's/.*([0-9].[0-9])$/\1/' + PXC_VER=8.0 ++ curl -s https://check.percona.com/versions/v1/pxc-operator ++ jq -r '.versions[].operator' ++ tail -n1 ++ sort -V + INIT_OPERATOR_VER=1.18.0 + [[ 1.18.0 == \1\.\1\9\.\0 ]] + GIT_TAG=v1.18.0 ++ curl -s 'https://check.percona.com/versions/v1/pxc-operator/1.18.0/latest?databaseVersion=8.0' + INIT_OPERATOR_IMAGES='{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + OPERATOR_NAME=percona-xtradb-cluster-operator ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.operator[].imagePath' + IMAGE=percona/percona-xtradb-cluster-operator:1.18.0 ++ echo perconalab/percona-xtradb-cluster-operator:PR-2002-42929599 ++ cut -d/ -f1 + [[ perconalab == \p\e\r\c\o\n\a\l\a\b ]] + IMAGE=perconalab/percona-xtradb-cluster-operator:1.18.0 ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.pxc[].imagePath' + IMAGE_PXC=percona/percona-xtradb-cluster:8.0.42-33.1 ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ tail -n1 ++ jq -r '.versions[].matrix.pmm[].imagePath' + IMAGE_PMM_CLIENT=percona/pmm-client:3.3.1 ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.proxysql[].imagePath' + IMAGE_PROXY=percona/proxysql2:2.7.3 ++ jq -r '.versions[].matrix.haproxy[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + IMAGE_HAPROXY=percona/haproxy:2.8.15 ++ jq -r '.versions[].matrix.backup[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + IMAGE_BACKUP=percona/percona-xtrabackup:8.0.35-34.1 ++ jq -r '.versions[].matrix.logCollector[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator", "operator":"1.18.0", "matrix":{"mongod":{}, "pxc":{"8.0.42-33.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.42-33.1", "imageHash":"476851339090e44bb72760ae718fc36beb73a6028a29459e849271649018d546", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1", "imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "imageHashArm64":"", "status":"recommended", "critical":false}, "3.3.1":{"imagePath":"percona/pmm-client:3.3.1", "imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3", "imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567", "imageHashArm64":"", "status":"recommended", "critical":false}}, "haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15", "imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006", "imageHashArm64":"", "status":"recommended", "critical":false}}, "backup":{"8.0.35":{"imagePath":"percona/percona-xtrabackup:8.0.35-34.1", "imageHash":"2dc127b08971051296d421b22aa861bb0330cf702b4b0246ae31053b0f01911e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0", "imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85", "imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356", "status":"recommended", "critical":false}}, "logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1", "imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779", "imageHashArm64":"", "status":"recommended", "critical":false}}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + IMAGE_LOGCOLLECTOR=percona/fluentbit:4.0.1 + [[ -n '' ]] + [[ 1.19.0 == \1\.\1\8\.\0 ]] + main + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.wC6iOOdUKQ ++ mktemp + local LAST_ERR=/tmp/tmp.6XvcY3kpqg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wC6iOOdUKQ namespace/cert-manager created + cat /tmp/tmp.6XvcY3kpqg + rm /tmp/tmp.wC6iOOdUKQ /tmp/tmp.6XvcY3kpqg + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.kPRPKieqrS ++ mktemp + local LAST_ERR=/tmp/tmp.nLmmblmSQJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kPRPKieqrS namespace/cert-manager labeled + cat /tmp/tmp.nLmmblmSQJ + rm /tmp/tmp.kPRPKieqrS /tmp/tmp.nLmmblmSQJ + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.H0M2zOTaEJ ++ mktemp + local LAST_ERR=/tmp/tmp.gUKNGqjbHf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.H0M2zOTaEJ namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.gUKNGqjbHf Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.H0M2zOTaEJ /tmp/tmp.gUKNGqjbHf + return 0 + '[' '' == 4.10 ']' + sleep 70 + create_infra_gh upgrade-haproxy-12962 v1.18.0 + local ns=upgrade-haproxy-12962 + local git_tag=v1.18.0 + '[' -n pxc-operator ']' + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ tail -n1 ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh.org ++ awk '{print $1}' ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- ++ mktemp cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.q0CSTBviks + local LAST_OUT=/tmp/tmp.pX7m5WFwX2 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.nyCdyF9mER + local exit_status=0 + local LAST_ERR=/tmp/tmp.OQJjnJUFF7 + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl get ns + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pX7m5WFwX2 + cat /tmp/tmp.OQJjnJUFF7 + rm /tmp/tmp.pX7m5WFwX2 /tmp/tmp.OQJjnJUFF7 + return 0 namespace "cert-manager" deleted namespace "upgrade-haproxy-23744" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.q0CSTBviks namespace "pxc-operator" deleted + cat /tmp/tmp.nyCdyF9mER + rm /tmp/tmp.q0CSTBviks /tmp/tmp.nyCdyF9mER + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.jeSo2jZYJ8 ++ mktemp + local LAST_ERR=/tmp/tmp.qJvZhMoggS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jeSo2jZYJ8 namespace/pxc-operator created + cat /tmp/tmp.qJvZhMoggS + rm /tmp/tmp.jeSo2jZYJ8 /tmp/tmp.qJvZhMoggS + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.jvoyDY11RM +++ mktemp ++ local LAST_ERR=/tmp/tmp.mRIJMiuaYu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jvoyDY11RM ++ cat /tmp/tmp.mRIJMiuaYu ++ rm /tmp/tmp.jvoyDY11RM /tmp/tmp.mRIJMiuaYu ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.cvhmLYQYnS ++ mktemp + local LAST_ERR=/tmp/tmp.NxpAshu5NL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.cvhmLYQYnS Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2" modified. + cat /tmp/tmp.NxpAshu5NL + rm /tmp/tmp.cvhmLYQYnS /tmp/tmp.NxpAshu5NL + return 0 + deploy_operator_gh v1.18.0 + local git_tag=v1.18.0 + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- ++ kubectl_bin get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cM70uEhS1h +++ mktemp ++ local LAST_ERR=/tmp/tmp.HitokOohb3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.cM70uEhS1h ++ cat /tmp/tmp.HitokOohb3 ++ rm /tmp/tmp.cM70uEhS1h /tmp/tmp.HitokOohb3 ++ return 0 + [[ -n perconaxtradbclusters.pxc.percona.com ]] ++ kubectl_bin get crd/perconaxtradbclusters.pxc.percona.com -o 'jsonpath={.spec.versions[?(@.name == "v1-18-0")].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nIP0nUmo4l +++ mktemp ++ local LAST_ERR=/tmp/tmp.mPAkyy4xRl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/perconaxtradbclusters.pxc.percona.com -o 'jsonpath={.spec.versions[?(@.name == "v1-18-0")].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nIP0nUmo4l ++ cat /tmp/tmp.mPAkyy4xRl ++ rm /tmp/tmp.nIP0nUmo4l /tmp/tmp.mPAkyy4xRl ++ return 0 + [[ -n '' ]] + kubectl_bin apply --server-side --force-conflicts -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.TojqEPxtL1 ++ mktemp + local LAST_ERR=/tmp/tmp.8pxSChG1UE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TojqEPxtL1 + cat /tmp/tmp.8pxSChG1UE + rm /tmp/tmp.TojqEPxtL1 /tmp/tmp.8pxSChG1UE + return 0 + local rbac_yaml=rbac + local operator_yaml=operator.yaml + '[' -n pxc-operator ']' + rbac_yaml=cw-rbac + operator_yaml=cw-operator.yaml + apply_rbac_gh cw-rbac v1.18.0 + local operator_namespace=pxc-operator + local rbac=cw-rbac + local git_tag=v1.18.0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/cw-rbac.yaml + /usr/bin/sed -i -e 's^namespace: .*^namespace: pxc-operator^' /tmp/tmp.BlMheGOr7P/rbac_v1.18.0.yaml + kubectl_bin apply -f /tmp/tmp.BlMheGOr7P/rbac_v1.18.0.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.AvGlSCXbvd ++ mktemp + local LAST_ERR=/tmp/tmp.4VH9E4v3Iq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /tmp/tmp.BlMheGOr7P/rbac_v1.18.0.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.AvGlSCXbvd clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.4VH9E4v3Iq + rm /tmp/tmp.AvGlSCXbvd /tmp/tmp.4VH9E4v3Iq + return 0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/cw-operator.yaml + cat /tmp/tmp.BlMheGOr7P/cw-operator.yaml_v1.18.0.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:1.18.0^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' + kubectl_bin apply -n pxc-operator -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' ++ mktemp + local LAST_OUT=/tmp/tmp.AhgdBXNiVw ++ mktemp + local LAST_ERR=/tmp/tmp.AFfzHwTMqI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n pxc-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.AhgdBXNiVw deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.AFfzHwTMqI + rm /tmp/tmp.AhgdBXNiVw /tmp/tmp.AFfzHwTMqI + return 0 + sleep 2 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.npQlal3ABM +++ mktemp ++ local LAST_ERR=/tmp/tmp.lKZG5QB6lV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.npQlal3ABM ++ cat /tmp/tmp.lKZG5QB6lV ++ rm /tmp/tmp.npQlal3ABM /tmp/tmp.lKZG5QB6lV ++ return 0 + wait_pod percona-xtradb-cluster-operator-85f65db574-gn84b + local pod=percona-xtradb-cluster-operator-85f65db574-gn84b + local max_retry=480 + local ns= ++ echo percona-xtradb-cluster-operator-85f65db574-gn84b ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-85f65db574-gn84b condition met waiting for pod/percona-xtradb-cluster-operator-85f65db574-gn84b to become Ready.Ok + create_namespace upgrade-haproxy-12962 + local namespace=upgrade-haproxy-12962 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces upgrade-haproxy-12962' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-haproxy-12962 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-haproxy-12962 ++ mktemp + local LAST_OUT=/tmp/tmp.dQD4mibmqt + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.IQg81Sy1bu + local exit_status=0 + local LAST_OUT=/tmp/tmp.3jn8JHAOg5 ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-haproxy-12962 + local LAST_ERR=/tmp/tmp.kWIzuxLpU0 + local exit_status=0 + xargs kubectl delete ns + awk '{print$1}' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-haproxy-12962 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3jn8JHAOg5 + cat /tmp/tmp.kWIzuxLpU0 + rm /tmp/tmp.3jn8JHAOg5 /tmp/tmp.kWIzuxLpU0 + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-haproxy-12962 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.dQD4mibmqt + cat /tmp/tmp.IQg81Sy1bu Error from server (NotFound): namespaces "upgrade-haproxy-12962" not found + rm /tmp/tmp.dQD4mibmqt /tmp/tmp.IQg81Sy1bu + return 1 + : + wait_for_delete namespace/upgrade-haproxy-12962 + local res=namespace/upgrade-haproxy-12962 + echo -n 'waiting for namespace/upgrade-haproxy-12962 to be deleted' waiting for namespace/upgrade-haproxy-12962 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "upgrade-haproxy-12962" not found + desc 'create namespace upgrade-haproxy-12962' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-haproxy-12962 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-haproxy-12962 ++ mktemp + local LAST_OUT=/tmp/tmp.qh2LvB6HnR ++ mktemp + local LAST_ERR=/tmp/tmp.uRpw4lraGk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace upgrade-haproxy-12962 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qh2LvB6HnR namespace/upgrade-haproxy-12962 created + cat /tmp/tmp.uRpw4lraGk + rm /tmp/tmp.qh2LvB6HnR /tmp/tmp.uRpw4lraGk + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Oiv7trDLX2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WB7qiKCjdd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Oiv7trDLX2 ++ cat /tmp/tmp.WB7qiKCjdd ++ rm /tmp/tmp.Oiv7trDLX2 /tmp/tmp.WB7qiKCjdd ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2 --namespace=upgrade-haproxy-12962 ++ mktemp + local LAST_OUT=/tmp/tmp.meOcv9Isnp ++ mktemp + local LAST_ERR=/tmp/tmp.ZYxhtjRDha + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2 --namespace=upgrade-haproxy-12962 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.meOcv9Isnp Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2" modified. + cat /tmp/tmp.ZYxhtjRDha + rm /tmp/tmp.meOcv9Isnp /tmp/tmp.ZYxhtjRDha + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.nREtgPMaCT ++ mktemp + local LAST_ERR=/tmp/tmp.oolLjMGl3b + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nREtgPMaCT secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.oolLjMGl3b + rm /tmp/tmp.nREtgPMaCT /tmp/tmp.oolLjMGl3b + return 0 + start_minio + deploy_helm upgrade-haproxy-12962 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + local cert_secret= + local endpoint=http://minio-service:9000 + minio_args=(--version $MINIO_VER --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set "users[0].accessKey=some-access-key" --set "users[0].secretKey=some-secret-key" --set "users[0].policy=consoleAdmin" --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G) + local minio_args + [[ -n '' ]] + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio NAME: minio-service LAST DEPLOYED: Tue Nov 4 09:58:32 2025 NAMESPACE: upgrade-haproxy-12962 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.upgrade-haproxy-12962.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace upgrade-haproxy-12962 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace upgrade-haproxy-12962 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace upgrade-haproxy-12962 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace upgrade-haproxy-12962 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local + sleep 30 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wfJCBqDDXe +++ mktemp ++ local LAST_ERR=/tmp/tmp.t6RoxuAcUf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wfJCBqDDXe ++ cat /tmp/tmp.t6RoxuAcUf ++ rm /tmp/tmp.wfJCBqDDXe /tmp/tmp.t6RoxuAcUf ++ return 0 + MINIO_POD=minio-service-55fcc5d75f-j7vz2 + wait_pod minio-service-55fcc5d75f-j7vz2 + local pod=minio-service-55fcc5d75f-j7vz2 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo minio-service-55fcc5d75f-j7vz2 + local container= + set +o xtrace pod/minio-service-55fcc5d75f-j7vz2 condition met waiting for pod/minio-service-55fcc5d75f-j7vz2 to become Ready.Ok + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing ++ mktemp + local LAST_OUT=/tmp/tmp.6ak1jUp0WA ++ mktemp + local LAST_ERR=/tmp/tmp.Hg22Dkpy1H + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6ak1jUp0WA make_bucket: operator-testing pod "aws-cli" deleted from upgrade-haproxy-12962 namespace + cat /tmp/tmp.Hg22Dkpy1H + rm /tmp/tmp.6ak1jUp0WA /tmp/tmp.Hg22Dkpy1H + return 0 + local proxy=haproxy + local cr_yaml=/tmp/tmp.BlMheGOr7P/cr_1.18.0_haproxy.yaml + prepare_cr_yaml /tmp/tmp.BlMheGOr7P/cr_1.18.0_haproxy.yaml haproxy upgrade-haproxy 3 v1.18.0 + local cr_yaml=/tmp/tmp.BlMheGOr7P/cr_1.18.0_haproxy.yaml + local proxy=haproxy + local cluster=upgrade-haproxy + local cluster_size=3 + local git_tag=v1.18.0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/cr.yaml + yq eval ' .metadata.name = "upgrade-haproxy" | .spec.secretsName = "my-cluster-secrets" | .spec.vaultSecretName = "some-name-vault" | .spec.sslSecretName = "some-name-ssl" | .spec.sslInternalSecretName = "some-name-ssl-internal" | .spec.upgradeOptions.apply = "disabled" | .spec.pxc.size = 3 | .spec.proxysql.size = 3 | .spec.haproxy.size = 3 | .spec.pxc.image = "-pxc" | .spec.proxysql.image = "-proxysql" | .spec.haproxy.image = "-haproxy" | .spec.backup.image = "-backup" | .spec.backup.storages.minio.s3.credentialsSecret = "minio-secret" | .spec.backup.storages.minio.s3.region = "us-east-1" | .spec.backup.storages.minio.s3.bucket = "operator-testing" | .spec.backup.storages.minio.s3.endpointUrl = "http://minio-service.#namespace:9000/" | .spec.backup.storages.minio.type = "s3" | .spec.pmm.image = "-pmm" ' - + [[ haproxy == \h\a\p\r\o\x\y ]] + yq -i eval ' .spec.haproxy.enabled = true | .spec.proxysql.enabled = false ' /tmp/tmp.BlMheGOr7P/cr_1.18.0_haproxy.yaml + [[ -n '' ]] + spinup_pxc upgrade-haproxy /tmp/tmp.BlMheGOr7P/cr_1.18.0_haproxy.yaml 3 30 /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/secrets_without_tls.yml + local cluster=upgrade-haproxy + local config=/tmp/tmp.BlMheGOr7P/cr_1.18.0_haproxy.yaml + local size=3 + local sleep=30 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/secrets_without_tls.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/secrets_without_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.K15fltVQ3V ++ mktemp + local LAST_ERR=/tmp/tmp.IduNQpp39L + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/secrets_without_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.K15fltVQ3V secret/my-cluster-secrets created + cat /tmp/tmp.IduNQpp39L + rm /tmp/tmp.K15fltVQ3V /tmp/tmp.IduNQpp39L + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/client.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/client.yml + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/conf/client.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.42-33.1#' + local LAST_OUT=/tmp/tmp.uYplDDkuJl + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:3.3.1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.42-33.1#' + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtrabackup:8.0.35-34.1#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:1.18.0#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/haproxy:2.8.15#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: percona/fluentbit:4.0.1#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-haproxy-12962~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/proxysql2:2.7.3#' ++ mktemp + local LAST_ERR=/tmp/tmp.IBSsHz8NkM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uYplDDkuJl deployment.apps/pxc-client created + cat /tmp/tmp.IBSsHz8NkM + rm /tmp/tmp.uYplDDkuJl /tmp/tmp.IBSsHz8NkM + return 0 + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + apply_config /tmp/tmp.BlMheGOr7P/cr_1.18.0_haproxy.yaml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /tmp/tmp.BlMheGOr7P/cr_1.18.0_haproxy.yaml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.42-33.1#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:1.18.0#' + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:3.3.1#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/haproxy:2.8.15#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-haproxy-12962~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: percona/fluentbit:4.0.1#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/proxysql2:2.7.3#' + cat /tmp/tmp.BlMheGOr7P/cr_1.18.0_haproxy.yaml + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.42-33.1#' + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtrabackup:8.0.35-34.1#' ++ mktemp + local LAST_OUT=/tmp/tmp.ni2GTlubep ++ mktemp + local LAST_ERR=/tmp/tmp.P8Gs1MzGcq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ni2GTlubep perconaxtradbcluster.pxc.percona.com/upgrade-haproxy created + cat /tmp/tmp.P8Gs1MzGcq + rm /tmp/tmp.ni2GTlubep /tmp/tmp.P8Gs1MzGcq + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy upgrade-haproxy ++ local target_cluster=upgrade-haproxy +++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oYuYOlEkUX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BEAUUVRnU9 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.oYuYOlEkUX +++ cat /tmp/tmp.BEAUUVRnU9 +++ rm /tmp/tmp.oYuYOlEkUX /tmp/tmp.BEAUUVRnU9 +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo upgrade-haproxy-haproxy ++ return + local proxy=upgrade-haproxy-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-12962 ++ mktemp + local LAST_OUT=/tmp/tmp.PcUWvfI8og ++ mktemp + local LAST_ERR=/tmp/tmp.MzijFyZlly + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-12962 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-12962 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-12962 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.PcUWvfI8og + cat /tmp/tmp.MzijFyZlly error: no matching resources found + rm /tmp/tmp.PcUWvfI8og /tmp/tmp.MzijFyZlly + return 1 + true + wait_for_running upgrade-haproxy-haproxy 1 + local name=upgrade-haproxy-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-haproxy-0 480 + local pod=upgrade-haproxy-haproxy-0 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-haproxy-0 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/upgrade-haproxy-haproxy-0 condition met waiting for pod/upgrade-haproxy-haproxy-0 to become Ready.Ok + wait_for_running upgrade-haproxy-pxc 3 + local name=upgrade-haproxy-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-0 480 + local pod=upgrade-haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-0 condition met waiting for pod/upgrade-haproxy-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-1 480 + local pod=upgrade-haproxy-pxc-1 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo upgrade-haproxy-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-1 condition met waiting for pod/upgrade-haproxy-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-2 480 + local pod=upgrade-haproxy-pxc-2 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo upgrade-haproxy-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-2 condition met waiting for pod/upgrade-haproxy-pxc-2 to become Ready.Ok + sleep 30 ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.qmdw1wilJm +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z3jP4Goh3k ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qmdw1wilJm ++ cat /tmp/tmp.Z3jP4Goh3k ++ rm /tmp/tmp.qmdw1wilJm /tmp/tmp.Z3jP4Goh3k ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h upgrade-haproxy-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h upgrade-haproxy-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dxwF1STqy4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q44CiWJBXk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dxwF1STqy4 ++ cat /tmp/tmp.Q44CiWJBXk ++ rm /tmp/tmp.dxwF1STqy4 /tmp/tmp.Q44CiWJBXk ++ return 0 + client_pod=pxc-client-7464c4947b-rzlmm + wait_pod pxc-client-7464c4947b-rzlmm + local pod=pxc-client-7464c4947b-rzlmm + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-rzlmm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-rzlmm condition met waiting for pod/pxc-client-7464c4947b-rzlmm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h upgrade-haproxy-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h upgrade-haproxy-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.agWbJvKTEI +++ mktemp ++ local LAST_ERR=/tmp/tmp.QJeFI3jonH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.agWbJvKTEI ++ cat /tmp/tmp.QJeFI3jonH ++ rm /tmp/tmp.agWbJvKTEI /tmp/tmp.QJeFI3jonH ++ return 0 + client_pod=pxc-client-7464c4947b-rzlmm + wait_pod pxc-client-7464c4947b-rzlmm + local pod=pxc-client-7464c4947b-rzlmm + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-rzlmm ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-7464c4947b-rzlmm condition met waiting for pod/pxc-client-7464c4947b-rzlmm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oDhDCLb3v9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.dXCbeYZbCL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oDhDCLb3v9 ++ cat /tmp/tmp.dXCbeYZbCL ++ rm /tmp/tmp.oDhDCLb3v9 /tmp/tmp.dXCbeYZbCL ++ return 0 + client_pod=pxc-client-7464c4947b-rzlmm + wait_pod pxc-client-7464c4947b-rzlmm + local pod=pxc-client-7464c4947b-rzlmm + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-rzlmm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-rzlmm condition met waiting for pod/pxc-client-7464c4947b-rzlmm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.BlMheGOr7P/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.BlMheGOr7P/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MpOF4B7Apf +++ mktemp ++ local LAST_ERR=/tmp/tmp.yTNfHPG0zl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MpOF4B7Apf ++ cat /tmp/tmp.yTNfHPG0zl ++ rm /tmp/tmp.MpOF4B7Apf /tmp/tmp.yTNfHPG0zl ++ return 0 + client_pod=pxc-client-7464c4947b-rzlmm + wait_pod pxc-client-7464c4947b-rzlmm + local pod=pxc-client-7464c4947b-rzlmm + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-rzlmm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-rzlmm condition met waiting for pod/pxc-client-7464c4947b-rzlmm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.BlMheGOr7P/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.BlMheGOr7P/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AHJCTRglFd +++ mktemp ++ local LAST_ERR=/tmp/tmp.uWw8bmdOhN ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.AHJCTRglFd ++ cat /tmp/tmp.uWw8bmdOhN ++ rm /tmp/tmp.AHJCTRglFd /tmp/tmp.uWw8bmdOhN ++ return 0 + client_pod=pxc-client-7464c4947b-rzlmm + wait_pod pxc-client-7464c4947b-rzlmm + local pod=pxc-client-7464c4947b-rzlmm + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-rzlmm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-rzlmm condition met waiting for pod/pxc-client-7464c4947b-rzlmm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.BlMheGOr7P/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.BlMheGOr7P/select-1.sql ++ is_keyring_plugin_in_use upgrade-haproxy ++ local cluster=upgrade-haproxy ++ kubectl_bin exec -it upgrade-haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l2eJsBy7z9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.EvygppTHlv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it upgrade-haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.l2eJsBy7z9 ++ cat /tmp/tmp.EvygppTHlv Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.l2eJsBy7z9 /tmp/tmp.EvygppTHlv ++ return 0 + '[' '' ']' + compare_generation 1 haproxy upgrade-haproxy + local generation=1 + local proxy=haproxy + local cluster=upgrade-haproxy + local current_generation + [[ haproxy == \h\a\p\r\o\x\y ]] + containers=(pxc haproxy) + for container in '"${containers[@]}"' + check_generation 1 pxc upgrade-haproxy + local generation=1 + local container=pxc + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GPxf9Zc4Np +++ mktemp ++ local LAST_ERR=/tmp/tmp.otce63tdaj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.GPxf9Zc4Np ++ cat /tmp/tmp.otce63tdaj ++ rm /tmp/tmp.GPxf9Zc4Np /tmp/tmp.otce63tdaj ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + for container in '"${containers[@]}"' + check_generation 1 haproxy upgrade-haproxy + local generation=1 + local container=haproxy + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-haproxy -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wLT4uqGUPs +++ mktemp ++ local LAST_ERR=/tmp/tmp.HMEsRbcgVD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-haproxy -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wLT4uqGUPs ++ cat /tmp/tmp.HMEsRbcgVD ++ rm /tmp/tmp.wLT4uqGUPs /tmp/tmp.HMEsRbcgVD ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + run_backup upgrade-haproxy on-demand-backup-minio + local cluster=upgrade-haproxy + local backup=on-demand-backup-minio + log 'run pxc-backup/on-demand-backup-minio' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-04T10:05:05+0000]' run pxc-backup/on-demand-backup-minio [2025-11-04T10:05:05+0000] run pxc-backup/on-demand-backup-minio + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/conf/on-demand-backup-minio.yml ++ mktemp + local LAST_OUT=/tmp/tmp.it23zfnHTB ++ mktemp + local LAST_ERR=/tmp/tmp.60uEWiexGW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/conf/on-demand-backup-minio.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.it23zfnHTB perconaxtradbclusterbackup.pxc.percona.com/on-demand-backup-minio created + cat /tmp/tmp.60uEWiexGW + rm /tmp/tmp.it23zfnHTB /tmp/tmp.60uEWiexGW + return 0 + wait_backup on-demand-backup-minio + local backup=on-demand-backup-minio + local status=Succeeded + set +o xtrace waiting for pxc-backup/on-demand-backup-minio to reach Succeeded state..................Succeeded + desc 'upgrade operator' + set +o xtrace ----------------------------------------------------------------------------------- upgrade operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.8yfOjzLjTi ++ mktemp + local LAST_ERR=/tmp/tmp.ipyBNJ3pNc + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8yfOjzLjTi customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.ipyBNJ3pNc + rm /tmp/tmp.8yfOjzLjTi /tmp/tmp.ipyBNJ3pNc + return 0 + [[ -n pxc-operator ]] + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/deploy/cw-rbac.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.4b06d7Bu8e ++ mktemp + local LAST_ERR=/tmp/tmp.bhU6W8Akhr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + sed -e 's^namespace: .*^namespace: pxc-operator^' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4b06d7Bu8e clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.bhU6W8Akhr + rm /tmp/tmp.4b06d7Bu8e /tmp/tmp.bhU6W8Akhr + return 0 + kubectl_bin patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-2002-42929599"}]}}}}' -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.jzoFX3pnP2 ++ mktemp + local LAST_ERR=/tmp/tmp.pzr4qOSeeI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-2002-42929599"}]}}}}' -n pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jzoFX3pnP2 deployment.apps/percona-xtradb-cluster-operator patched + cat /tmp/tmp.pzr4qOSeeI + rm /tmp/tmp.jzoFX3pnP2 /tmp/tmp.pzr4qOSeeI + return 0 + kubectl_bin rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.sjvGbisskg ++ mktemp + local LAST_ERR=/tmp/tmp.mq8B32qXRe + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.sjvGbisskg Waiting for deployment "percona-xtradb-cluster-operator" rollout to finish: 0 of 1 updated replicas are available... deployment "percona-xtradb-cluster-operator" successfully rolled out + cat /tmp/tmp.mq8B32qXRe + rm /tmp/tmp.sjvGbisskg /tmp/tmp.mq8B32qXRe + return 0 + sleep 10 + desc 'wait for operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- wait for operator upgrade ----------------------------------------------------------------------------------- + local i=0 + local max=60 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ grep -vc NAME ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3nOCcQoAKn +++ mktemp ++ local LAST_ERR=/tmp/tmp.bWzusjYh6p ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3nOCcQoAKn ++ cat /tmp/tmp.bWzusjYh6p ++ rm /tmp/tmp.3nOCcQoAKn /tmp/tmp.bWzusjYh6p ++ return 0 + [[ 1 -eq 1 ]] + '[' -n pxc-operator ']' ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.3XJvpwlFUD +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mzlkl4I7Wx ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3XJvpwlFUD ++ cat /tmp/tmp.Mzlkl4I7Wx ++ rm /tmp/tmp.3XJvpwlFUD /tmp/tmp.Mzlkl4I7Wx ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.9YnCRGYkHp ++ mktemp + local LAST_ERR=/tmp/tmp.uYP634waZo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9YnCRGYkHp Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2" modified. + cat /tmp/tmp.uYP634waZo + rm /tmp/tmp.9YnCRGYkHp /tmp/tmp.uYP634waZo + return 0 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ grep perconalab/percona-xtradb-cluster-operator:PR-2002-42929599 ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N3CDC7KPT9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.iOJZlcmk1H ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.N3CDC7KPT9 ++ cat /tmp/tmp.iOJZlcmk1H ++ rm /tmp/tmp.N3CDC7KPT9 /tmp/tmp.iOJZlcmk1H ++ return 0 + wait_pod percona-xtradb-cluster-operator-56756b55c9-hjskb + local pod=percona-xtradb-cluster-operator-56756b55c9-hjskb + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo percona-xtradb-cluster-operator-56756b55c9-hjskb ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-56756b55c9-hjskb condition met waiting for pod/percona-xtradb-cluster-operator-56756b55c9-hjskb to become Ready.Ok ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.97u89TM13l +++ mktemp ++ local LAST_ERR=/tmp/tmp.DuquPI0B5G ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.97u89TM13l ++ cat /tmp/tmp.DuquPI0B5G ++ rm /tmp/tmp.97u89TM13l /tmp/tmp.DuquPI0B5G ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2 --namespace=upgrade-haproxy-12962 ++ mktemp + local LAST_OUT=/tmp/tmp.8nF0YLM2dA ++ mktemp + local LAST_ERR=/tmp/tmp.g9GvqMO2Q9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2 --namespace=upgrade-haproxy-12962 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8nF0YLM2dA Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2002-42929599-9-cluster2" modified. + cat /tmp/tmp.g9GvqMO2Q9 + rm /tmp/tmp.8nF0YLM2dA /tmp/tmp.g9GvqMO2Q9 + return 0 + desc 'check images and generation after operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after operator upgrade ----------------------------------------------------------------------------------- + check_pxc_liveness upgrade-haproxy 3 + local cluster=upgrade-haproxy + local cluster_size=3 + wait_cluster_consistency upgrade-haproxy 3 + local cluster_name=upgrade-haproxy + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size upgrade-haproxy ++ local cluster=upgrade-haproxy +++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GwvZOCUti0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IbnQLCN3a9 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.GwvZOCUti0 +++ cat /tmp/tmp.IbnQLCN3a9 +++ rm /tmp/tmp.GwvZOCUti0 /tmp/tmp.IbnQLCN3a9 +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yClDfGBgBV +++ mktemp ++ local LAST_ERR=/tmp/tmp.VJQrtA8O3w ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.yClDfGBgBV ++ cat /tmp/tmp.VJQrtA8O3w ++ rm /tmp/tmp.yClDfGBgBV /tmp/tmp.VJQrtA8O3w ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/upgrade-haproxy to be ready' waiting for pxc/upgrade-haproxy to be ready++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0roSSwAbS4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.z1nif2tPrR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0roSSwAbS4 ++ cat /tmp/tmp.z1nif2tPrR ++ rm /tmp/tmp.0roSSwAbS4 /tmp/tmp.z1nif2tPrR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 0 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FHnrsJ1NLI +++ mktemp ++ local LAST_ERR=/tmp/tmp.ywWdUbXPhS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.FHnrsJ1NLI ++ cat /tmp/tmp.ywWdUbXPhS ++ rm /tmp/tmp.FHnrsJ1NLI /tmp/tmp.ywWdUbXPhS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 1 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8D7UNJ7rzv +++ mktemp ++ local LAST_ERR=/tmp/tmp.EQ4qzwMQha ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8D7UNJ7rzv ++ cat /tmp/tmp.EQ4qzwMQha ++ rm /tmp/tmp.8D7UNJ7rzv /tmp/tmp.EQ4qzwMQha ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 2 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lKrCK82eba +++ mktemp ++ local LAST_ERR=/tmp/tmp.87OZJRm9yX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.lKrCK82eba ++ cat /tmp/tmp.87OZJRm9yX ++ rm /tmp/tmp.lKrCK82eba /tmp/tmp.87OZJRm9yX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 3 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.82EkY8dcIl +++ mktemp ++ local LAST_ERR=/tmp/tmp.KQBaNSqiUx ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.82EkY8dcIl ++ cat /tmp/tmp.KQBaNSqiUx ++ rm /tmp/tmp.82EkY8dcIl /tmp/tmp.KQBaNSqiUx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 4 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iat7rQZFZ4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xsTceM8CWB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.iat7rQZFZ4 ++ cat /tmp/tmp.xsTceM8CWB ++ rm /tmp/tmp.iat7rQZFZ4 /tmp/tmp.xsTceM8CWB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 5 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xi5YUKBDMS +++ mktemp ++ local LAST_ERR=/tmp/tmp.bk5CQkzsEK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xi5YUKBDMS ++ cat /tmp/tmp.bk5CQkzsEK ++ rm /tmp/tmp.xi5YUKBDMS /tmp/tmp.bk5CQkzsEK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 6 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ExHFt6JANv +++ mktemp ++ local LAST_ERR=/tmp/tmp.7mWsNWdRr3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ExHFt6JANv ++ cat /tmp/tmp.7mWsNWdRr3 ++ rm /tmp/tmp.ExHFt6JANv /tmp/tmp.7mWsNWdRr3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 7 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uRDE3JYNsG +++ mktemp ++ local LAST_ERR=/tmp/tmp.x2rTUTDZau ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uRDE3JYNsG ++ cat /tmp/tmp.x2rTUTDZau ++ rm /tmp/tmp.uRDE3JYNsG /tmp/tmp.x2rTUTDZau ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 8 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ANNocadkbV +++ mktemp ++ local LAST_ERR=/tmp/tmp.WngmTgLuzu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ANNocadkbV ++ cat /tmp/tmp.WngmTgLuzu ++ rm /tmp/tmp.ANNocadkbV /tmp/tmp.WngmTgLuzu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 9 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NJafyLQU75 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2xmUrVfmZU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NJafyLQU75 ++ cat /tmp/tmp.2xmUrVfmZU ++ rm /tmp/tmp.NJafyLQU75 /tmp/tmp.2xmUrVfmZU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 10 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vY4CuR0NYg +++ mktemp ++ local LAST_ERR=/tmp/tmp.BR0PktsbFy ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vY4CuR0NYg ++ cat /tmp/tmp.BR0PktsbFy ++ rm /tmp/tmp.vY4CuR0NYg /tmp/tmp.BR0PktsbFy ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 11 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0Iy49eWcVL +++ mktemp ++ local LAST_ERR=/tmp/tmp.DhNxlNjhYm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0Iy49eWcVL ++ cat /tmp/tmp.DhNxlNjhYm ++ rm /tmp/tmp.0Iy49eWcVL /tmp/tmp.DhNxlNjhYm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 12 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XDhgH3iJBW +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xjn9iqfAFk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XDhgH3iJBW ++ cat /tmp/tmp.Xjn9iqfAFk ++ rm /tmp/tmp.XDhgH3iJBW /tmp/tmp.Xjn9iqfAFk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 13 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yJNJFrDGIs +++ mktemp ++ local LAST_ERR=/tmp/tmp.LK1cyoFXzT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.yJNJFrDGIs ++ cat /tmp/tmp.LK1cyoFXzT ++ rm /tmp/tmp.yJNJFrDGIs /tmp/tmp.LK1cyoFXzT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 14 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZSLlYaEh4z +++ mktemp ++ local LAST_ERR=/tmp/tmp.SP2RVMHIQy ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZSLlYaEh4z ++ cat /tmp/tmp.SP2RVMHIQy ++ rm /tmp/tmp.ZSLlYaEh4z /tmp/tmp.SP2RVMHIQy ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 15 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v4jcvsGtNi +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZoKo0DgPYa ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.v4jcvsGtNi ++ cat /tmp/tmp.ZoKo0DgPYa ++ rm /tmp/tmp.v4jcvsGtNi /tmp/tmp.ZoKo0DgPYa ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 16 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UOTQwvwTIZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.sqb45ADptl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.UOTQwvwTIZ ++ cat /tmp/tmp.sqb45ADptl ++ rm /tmp/tmp.UOTQwvwTIZ /tmp/tmp.sqb45ADptl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 17 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4nGvf0VdsG +++ mktemp ++ local LAST_ERR=/tmp/tmp.L20dNQI4ET ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4nGvf0VdsG ++ cat /tmp/tmp.L20dNQI4ET ++ rm /tmp/tmp.4nGvf0VdsG /tmp/tmp.L20dNQI4ET ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 18 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.83AZUNpClC +++ mktemp ++ local LAST_ERR=/tmp/tmp.xGxjClJwCi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.83AZUNpClC ++ cat /tmp/tmp.xGxjClJwCi ++ rm /tmp/tmp.83AZUNpClC /tmp/tmp.xGxjClJwCi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 19 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nXNLP4moLS +++ mktemp ++ local LAST_ERR=/tmp/tmp.lBc9BoXVD9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nXNLP4moLS ++ cat /tmp/tmp.lBc9BoXVD9 ++ rm /tmp/tmp.nXNLP4moLS /tmp/tmp.lBc9BoXVD9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 20 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cXvn1wcLM9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3Mh2bjgfPh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.cXvn1wcLM9 ++ cat /tmp/tmp.3Mh2bjgfPh ++ rm /tmp/tmp.cXvn1wcLM9 /tmp/tmp.3Mh2bjgfPh ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 21 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OTuK75w276 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qolopgPO0E ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OTuK75w276 ++ cat /tmp/tmp.qolopgPO0E ++ rm /tmp/tmp.OTuK75w276 /tmp/tmp.qolopgPO0E ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 22 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IwikVmsmDT +++ mktemp ++ local LAST_ERR=/tmp/tmp.xP1kDup2ty ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IwikVmsmDT ++ cat /tmp/tmp.xP1kDup2ty ++ rm /tmp/tmp.IwikVmsmDT /tmp/tmp.xP1kDup2ty ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 23 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5QSNMETUfI +++ mktemp ++ local LAST_ERR=/tmp/tmp.KFQ7OtYqNQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.5QSNMETUfI ++ cat /tmp/tmp.KFQ7OtYqNQ ++ rm /tmp/tmp.5QSNMETUfI /tmp/tmp.KFQ7OtYqNQ ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LN7z43Hi3n +++ mktemp ++ local LAST_ERR=/tmp/tmp.GlfT5rUr1T ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LN7z43Hi3n ++ cat /tmp/tmp.GlfT5rUr1T ++ rm /tmp/tmp.LN7z43Hi3n /tmp/tmp.GlfT5rUr1T ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine upgrade-haproxy +++ local cluster_name=upgrade-haproxy ++++ get_proxy upgrade-haproxy ++++ local target_cluster=upgrade-haproxy +++++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.TeG9J8HidO ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.nYNHbmN1On +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.TeG9J8HidO +++++ cat /tmp/tmp.nYNHbmN1On +++++ rm /tmp/tmp.TeG9J8HidO /tmp/tmp.nYNHbmN1On +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo upgrade-haproxy-haproxy ++++ return +++ local cluster_proxy=upgrade-haproxy-haproxy +++ echo haproxy ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z9vUhWwz1M +++ mktemp ++ local LAST_ERR=/tmp/tmp.xkZqJZeXvj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Z9vUhWwz1M ++ cat /tmp/tmp.xkZqJZeXvj ++ rm /tmp/tmp.Z9vUhWwz1M /tmp/tmp.xkZqJZeXvj ++ return 0 + [[ 3 == \3 ]] + echo + wait_for_running upgrade-haproxy-pxc 3 + local name=upgrade-haproxy-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-0 480 + local pod=upgrade-haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-0 condition met waiting for pod/upgrade-haproxy-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-1 480 + local pod=upgrade-haproxy-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-1 condition met waiting for pod/upgrade-haproxy-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-2 480 + local pod=upgrade-haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-2 condition met waiting for pod/upgrade-haproxy-pxc-2 to become Ready.Ok ++ seq 0 2 + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TDiw3X0g6N +++ mktemp ++ local LAST_ERR=/tmp/tmp.XdMxXQZ5mI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.TDiw3X0g6N ++ cat /tmp/tmp.XdMxXQZ5mI ++ rm /tmp/tmp.TDiw3X0g6N /tmp/tmp.XdMxXQZ5mI ++ return 0 + client_pod=pxc-client-7464c4947b-rzlmm + wait_pod pxc-client-7464c4947b-rzlmm + local pod=pxc-client-7464c4947b-rzlmm + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-rzlmm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-rzlmm condition met waiting for pod/pxc-client-7464c4947b-rzlmm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.BlMheGOr7P/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.BlMheGOr7P/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1Wiwehnx7C +++ mktemp ++ local LAST_ERR=/tmp/tmp.FBfDrC4yCJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1Wiwehnx7C ++ cat /tmp/tmp.FBfDrC4yCJ ++ rm /tmp/tmp.1Wiwehnx7C /tmp/tmp.FBfDrC4yCJ ++ return 0 + client_pod=pxc-client-7464c4947b-rzlmm + wait_pod pxc-client-7464c4947b-rzlmm + local pod=pxc-client-7464c4947b-rzlmm + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-rzlmm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-7464c4947b-rzlmm condition met waiting for pod/pxc-client-7464c4947b-rzlmm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.BlMheGOr7P/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.BlMheGOr7P/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.42-33.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZNzUt2EQK1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.e694Jvmqda ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZNzUt2EQK1 ++ cat /tmp/tmp.e694Jvmqda ++ rm /tmp/tmp.ZNzUt2EQK1 /tmp/tmp.e694Jvmqda ++ return 0 + client_pod=pxc-client-7464c4947b-rzlmm + wait_pod pxc-client-7464c4947b-rzlmm + local pod=pxc-client-7464c4947b-rzlmm + local max_retry=480 + local ns= ++ echo pxc-client-7464c4947b-rzlmm ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-7464c4947b-rzlmm condition met waiting for pod/pxc-client-7464c4947b-rzlmm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.BlMheGOr7P/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2002/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.BlMheGOr7P/select-1.sql ++ kubectl_bin get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e3lectS7b1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.K2OUgJTUvY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.e3lectS7b1 ++ cat /tmp/tmp.K2OUgJTUvY ++ rm /tmp/tmp.e3lectS7b1 /tmp/tmp.K2OUgJTUvY ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:PR-2002-42929599 == perconalab/percona-xtradb-cluster-operator:PR-2002-42929599 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.proxysql.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VGE1wYvMXC +++ mktemp ++ local LAST_ERR=/tmp/tmp.VdYTB9AS4n ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.proxysql.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.VGE1wYvMXC ++ cat /tmp/tmp.VdYTB9AS4n ++ rm /tmp/tmp.VGE1wYvMXC /tmp/tmp.VdYTB9AS4n ++ return 0 + [[ percona/proxysql2:2.7.3 == percona/proxysql2:2.7.3 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x09OkVGMdw +++ mktemp ++ local LAST_ERR=/tmp/tmp.BB3C5FSah0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.x09OkVGMdw ++ cat /tmp/tmp.BB3C5FSah0 ++ rm /tmp/tmp.x09OkVGMdw /tmp/tmp.BB3C5FSah0 ++ return 0 + [[ percona/haproxy:2.8.15 == percona/haproxy:2.8.15 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9O4ibyYeIn +++ mktemp ++ local LAST_ERR=/tmp/tmp.ItKtE6X6iH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9O4ibyYeIn ++ cat /tmp/tmp.ItKtE6X6iH ++ rm /tmp/tmp.9O4ibyYeIn /tmp/tmp.ItKtE6X6iH ++ return 0 + [[ percona/percona-xtrabackup:8.0.35-34.1 == percona/percona-xtrabackup:8.0.35-34.1 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JztZuJWQ4G +++ mktemp ++ local LAST_ERR=/tmp/tmp.Bqz9g9Ifnf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JztZuJWQ4G ++ cat /tmp/tmp.Bqz9g9Ifnf ++ rm /tmp/tmp.JztZuJWQ4G /tmp/tmp.Bqz9g9Ifnf ++ return 0 + [[ percona/pmm-client:3.3.1 == percona/pmm-client:3.3.1 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.logcollector.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gmZzKvOkOQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.YlymHSyf7y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.logcollector.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gmZzKvOkOQ ++ cat /tmp/tmp.YlymHSyf7y ++ rm /tmp/tmp.gmZzKvOkOQ /tmp/tmp.YlymHSyf7y ++ return 0 + [[ percona/fluentbit:4.0.1 == percona/fluentbit:4.0.1 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.pxc.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Llo7F9Wl2Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.JDqZZWVmMk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.pxc.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Llo7F9Wl2Q ++ cat /tmp/tmp.JDqZZWVmMk ++ rm /tmp/tmp.Llo7F9Wl2Q /tmp/tmp.JDqZZWVmMk ++ return 0 + [[ percona/percona-xtradb-cluster:8.0.42-33.1 == percona/percona-xtradb-cluster:8.0.42-33.1 ]] + : Operator image has been updated correctly + compare_generation 1 haproxy upgrade-haproxy + local generation=1 + local proxy=haproxy + local cluster=upgrade-haproxy + local current_generation + [[ haproxy == \h\a\p\r\o\x\y ]] + containers=(pxc haproxy) + for container in '"${containers[@]}"' + check_generation 1 pxc upgrade-haproxy + local generation=1 + local container=pxc + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nfbjUItjRJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.KhpsJQfLJU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nfbjUItjRJ ++ cat /tmp/tmp.KhpsJQfLJU ++ rm /tmp/tmp.nfbjUItjRJ /tmp/tmp.KhpsJQfLJU ++ return 0 + current_generation=2 + [[ 1 != \2 ]] + echo 'Generation for resource pxc is: 2, but should be: 1' Generation for resource pxc is: 2, but should be: 1 + exit 1