Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/logs/upgrade-haproxy-5-7.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + CLUSTER=upgrade-haproxy + CLUSTER_SIZE=3 + TARGET_OPERATOR_VER=1.19.0 + TARGET_IMAGE=perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7 + TARGET_IMAGE_PXC=perconalab/percona-xtradb-cluster-operator:main-pxc5.7 + TARGET_IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest + TARGET_IMAGE_PROXY=perconalab/percona-xtradb-cluster-operator:main-proxysql + TARGET_IMAGE_HAPROXY=perconalab/percona-xtradb-cluster-operator:main-haproxy + TARGET_IMAGE_BACKUP=perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup + TARGET_IMAGE_LOGCOLLECTOR=perconalab/percona-xtradb-cluster-operator:main-logcollector + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 == *\p\e\r\c\o\n\a\-\x\t\r\a\d\b\-\c\l\u\s\t\e\r\-\o\p\e\r\a\t\o\r* ]] ++ echo -n perconalab/percona-xtradb-cluster-operator:main-pxc5.7 ++ /usr/bin/sed -r 's/.*([0-9].[0-9])$/\1/' + PXC_VER=5.7 ++ curl -s https://check.percona.com/versions/v1/pxc-operator ++ jq -r '.versions[].operator' ++ tail -n1 ++ sort -V + INIT_OPERATOR_VER=1.18.0 + [[ 1.18.0 == \1\.\1\9\.\0 ]] + GIT_TAG=v1.18.0 ++ curl -s 'https://check.percona.com/versions/v1/pxc-operator/1.18.0/latest?databaseVersion=5.7' + INIT_OPERATOR_IMAGES='{"versions":[{"product":"pxc-operator","operator":"1.18.0","matrix":{"mongod":{},"pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65","imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"recommended","critical":false},"3.3.1":{"imagePath":"percona/pmm-client:3.3.1","imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3","imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15","imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"2.4.29":{"imagePath":"percona/percona-xtrabackup:2.4.29","imageHash":"11b92a7f7362379fc6b0de92382706153f2ac007ebf0d7ca25bac2c7303fdf10","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0","imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85","imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' + OPERATOR_NAME=percona-xtradb-cluster-operator ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.18.0","matrix":{"mongod":{},"pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65","imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"recommended","critical":false},"3.3.1":{"imagePath":"percona/pmm-client:3.3.1","imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3","imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15","imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"2.4.29":{"imagePath":"percona/percona-xtrabackup:2.4.29","imageHash":"11b92a7f7362379fc6b0de92382706153f2ac007ebf0d7ca25bac2c7303fdf10","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0","imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85","imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.operator[].imagePath' + IMAGE=percona/percona-xtradb-cluster-operator:1.18.0 ++ echo perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7 ++ cut -d/ -f1 + [[ perconalab == \p\e\r\c\o\n\a\l\a\b ]] + IMAGE=perconalab/percona-xtradb-cluster-operator:1.18.0 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.18.0","matrix":{"mongod":{},"pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65","imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"recommended","critical":false},"3.3.1":{"imagePath":"percona/pmm-client:3.3.1","imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3","imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15","imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"2.4.29":{"imagePath":"percona/percona-xtrabackup:2.4.29","imageHash":"11b92a7f7362379fc6b0de92382706153f2ac007ebf0d7ca25bac2c7303fdf10","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0","imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85","imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.pxc[].imagePath' + IMAGE_PXC=percona/percona-xtradb-cluster:5.7.44-31.65 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.18.0","matrix":{"mongod":{},"pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65","imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"recommended","critical":false},"3.3.1":{"imagePath":"percona/pmm-client:3.3.1","imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3","imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15","imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"2.4.29":{"imagePath":"percona/percona-xtrabackup:2.4.29","imageHash":"11b92a7f7362379fc6b0de92382706153f2ac007ebf0d7ca25bac2c7303fdf10","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0","imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85","imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.pmm[].imagePath' ++ tail -n1 + IMAGE_PMM_CLIENT=percona/pmm-client:3.3.1 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.18.0","matrix":{"mongod":{},"pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65","imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"recommended","critical":false},"3.3.1":{"imagePath":"percona/pmm-client:3.3.1","imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3","imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15","imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"2.4.29":{"imagePath":"percona/percona-xtrabackup:2.4.29","imageHash":"11b92a7f7362379fc6b0de92382706153f2ac007ebf0d7ca25bac2c7303fdf10","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0","imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85","imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.proxysql[].imagePath' + IMAGE_PROXY=percona/proxysql2:2.7.3 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.18.0","matrix":{"mongod":{},"pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65","imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"recommended","critical":false},"3.3.1":{"imagePath":"percona/pmm-client:3.3.1","imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3","imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15","imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"2.4.29":{"imagePath":"percona/percona-xtrabackup:2.4.29","imageHash":"11b92a7f7362379fc6b0de92382706153f2ac007ebf0d7ca25bac2c7303fdf10","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0","imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85","imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.haproxy[].imagePath' + IMAGE_HAPROXY=percona/haproxy:2.8.15 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.18.0","matrix":{"mongod":{},"pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65","imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"recommended","critical":false},"3.3.1":{"imagePath":"percona/pmm-client:3.3.1","imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3","imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15","imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"2.4.29":{"imagePath":"percona/percona-xtrabackup:2.4.29","imageHash":"11b92a7f7362379fc6b0de92382706153f2ac007ebf0d7ca25bac2c7303fdf10","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0","imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85","imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.backup[].imagePath' + IMAGE_BACKUP=percona/percona-xtrabackup:2.4.29 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.18.0","matrix":{"mongod":{},"pxc":{"5.7.44-31.65":{"imagePath":"percona/percona-xtradb-cluster:5.7.44-31.65","imageHash":"36fafdef46485839d4ff7c6dc73b4542b07031644c0152e911acb9734ff2be85","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"recommended","critical":false},"3.3.1":{"imagePath":"percona/pmm-client:3.3.1","imageHash":"29a9bb1c69fef8bedc4d4a9ed0ae8224a8623fd3eb8676ef40b13fd044188cb4","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.3":{"imagePath":"percona/proxysql2:2.7.3","imageHash":"51fedf9de05e4f130d5b08388511536fb1e1050a24ffc21bedb0f0b61a236567","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.15":{"imagePath":"percona/haproxy:2.8.15","imageHash":"49e6987a1c8b27e9111ae1f1168dd51f2840eb6d939ffc157358f0f259819006","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"2.4.29":{"imagePath":"percona/percona-xtrabackup:2.4.29","imageHash":"11b92a7f7362379fc6b0de92382706153f2ac007ebf0d7ca25bac2c7303fdf10","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.18.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.18.0","imageHash":"0eca0b096482c7d09792c15fee00dbdcd0fbf3cd487dab60eb2774b025681e85","imageHashArm64":"bdb7a0ff6b78e98b16f8b521e91682202b6d404202283b34b8168013d5c06356","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.logCollector[].imagePath' + IMAGE_LOGCOLLECTOR=percona/fluentbit:4.0.1 + [[ -n '' ]] + [[ 1.19.0 == \1\.\1\8\.\0 ]] + main + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.hqVhbgVfxi ++ mktemp + local LAST_ERR=/tmp/tmp.PotAtCugYf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hqVhbgVfxi namespace/cert-manager created + cat /tmp/tmp.PotAtCugYf + rm /tmp/tmp.hqVhbgVfxi /tmp/tmp.PotAtCugYf + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.cp4O5HWchh ++ mktemp + local LAST_ERR=/tmp/tmp.zsl5zl3fOl + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.cp4O5HWchh namespace/cert-manager labeled + cat /tmp/tmp.zsl5zl3fOl + rm /tmp/tmp.cp4O5HWchh /tmp/tmp.zsl5zl3fOl + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.2jJ5ZUQjs9 ++ mktemp + local LAST_ERR=/tmp/tmp.GbznTjuX5w + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.2jJ5ZUQjs9 namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.GbznTjuX5w Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.2jJ5ZUQjs9 /tmp/tmp.GbznTjuX5w + return 0 + '[' '' == 4.10 ']' + sleep 70 + create_infra_gh upgrade-haproxy-25431 v1.18.0 + local ns=upgrade-haproxy-25431 + local git_tag=v1.18.0 + '[' -n pxc-operator ']' + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.U1zEhkfmVe ++ mktemp + kubectl_bin get ns ++ mktemp + local LAST_ERR=/tmp/tmp.lYJPK70wgA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + local LAST_OUT=/tmp/tmp.RPOG0C8d1f ++ mktemp + local LAST_ERR=/tmp/tmp.LKJhZxuK0O + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RPOG0C8d1f + cat /tmp/tmp.LKJhZxuK0O + rm /tmp/tmp.RPOG0C8d1f /tmp/tmp.LKJhZxuK0O + return 0 namespace "cert-manager" deleted namespace "upgrade-haproxy-30573" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.U1zEhkfmVe namespace "pxc-operator" deleted + cat /tmp/tmp.lYJPK70wgA + rm /tmp/tmp.U1zEhkfmVe /tmp/tmp.lYJPK70wgA + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.hZiCQ3V8XP ++ mktemp + local LAST_ERR=/tmp/tmp.PKBZJldarE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hZiCQ3V8XP namespace/pxc-operator created + cat /tmp/tmp.PKBZJldarE + rm /tmp/tmp.hZiCQ3V8XP /tmp/tmp.PKBZJldarE + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.udnxpL4Z83 +++ mktemp ++ local LAST_ERR=/tmp/tmp.e5quiSsPTU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.udnxpL4Z83 ++ cat /tmp/tmp.e5quiSsPTU ++ rm /tmp/tmp.udnxpL4Z83 /tmp/tmp.e5quiSsPTU ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.IHGqKRAfMR ++ mktemp + local LAST_ERR=/tmp/tmp.Fw2XkYIzhF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IHGqKRAfMR Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1" modified. + cat /tmp/tmp.Fw2XkYIzhF + rm /tmp/tmp.IHGqKRAfMR /tmp/tmp.Fw2XkYIzhF + return 0 + deploy_operator_gh v1.18.0 + local git_tag=v1.18.0 + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- ++ kubectl_bin get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hxWHBH6PIM +++ mktemp ++ local LAST_ERR=/tmp/tmp.s8nXDMUleF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.hxWHBH6PIM ++ cat /tmp/tmp.s8nXDMUleF ++ rm /tmp/tmp.hxWHBH6PIM /tmp/tmp.s8nXDMUleF ++ return 0 + [[ -n perconaxtradbclusters.pxc.percona.com ]] ++ kubectl_bin get crd/perconaxtradbclusters.pxc.percona.com -o 'jsonpath={.spec.versions[?(@.name == "v1-18-0")].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VI6vx6edQY +++ mktemp ++ local LAST_ERR=/tmp/tmp.yRLmjNen66 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/perconaxtradbclusters.pxc.percona.com -o 'jsonpath={.spec.versions[?(@.name == "v1-18-0")].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.VI6vx6edQY ++ cat /tmp/tmp.yRLmjNen66 ++ rm /tmp/tmp.VI6vx6edQY /tmp/tmp.yRLmjNen66 ++ return 0 + [[ -n '' ]] + kubectl_bin apply --server-side --force-conflicts -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.dqVb5JtKyi ++ mktemp + local LAST_ERR=/tmp/tmp.oNdpAo5L3D + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dqVb5JtKyi + cat /tmp/tmp.oNdpAo5L3D + rm /tmp/tmp.dqVb5JtKyi /tmp/tmp.oNdpAo5L3D + return 0 + local rbac_yaml=rbac + local operator_yaml=operator.yaml + '[' -n pxc-operator ']' + rbac_yaml=cw-rbac + operator_yaml=cw-operator.yaml + apply_rbac_gh cw-rbac v1.18.0 + local operator_namespace=pxc-operator + local rbac=cw-rbac + local git_tag=v1.18.0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/cw-rbac.yaml + /usr/bin/sed -i -e 's^namespace: .*^namespace: pxc-operator^' /tmp/tmp.z4cN68TT63/rbac_v1.18.0.yaml + kubectl_bin apply -f /tmp/tmp.z4cN68TT63/rbac_v1.18.0.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.TMzqyOjBZz ++ mktemp + local LAST_ERR=/tmp/tmp.YojC2ycdk5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /tmp/tmp.z4cN68TT63/rbac_v1.18.0.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TMzqyOjBZz clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.YojC2ycdk5 + rm /tmp/tmp.TMzqyOjBZz /tmp/tmp.YojC2ycdk5 + return 0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/cw-operator.yaml + cat /tmp/tmp.z4cN68TT63/cw-operator.yaml_v1.18.0.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:1.18.0^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' + kubectl_bin apply -n pxc-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2wKWRZuMD9 ++ mktemp + local LAST_ERR=/tmp/tmp.P44tdbn5O9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n pxc-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.2wKWRZuMD9 deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.P44tdbn5O9 + rm /tmp/tmp.2wKWRZuMD9 /tmp/tmp.P44tdbn5O9 + return 0 + sleep 2 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.upIvUP3xAB +++ mktemp ++ local LAST_ERR=/tmp/tmp.9iGvSuTTUZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.upIvUP3xAB ++ cat /tmp/tmp.9iGvSuTTUZ ++ rm /tmp/tmp.upIvUP3xAB /tmp/tmp.9iGvSuTTUZ ++ return 0 + wait_pod percona-xtradb-cluster-operator-85f65db574-jt58w + local pod=percona-xtradb-cluster-operator-85f65db574-jt58w + local max_retry=480 + local ns= ++ echo percona-xtradb-cluster-operator-85f65db574-jt58w ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-85f65db574-jt58w condition met waiting for pod/percona-xtradb-cluster-operator-85f65db574-jt58w to become Ready.Ok + create_namespace upgrade-haproxy-25431 + local namespace=upgrade-haproxy-25431 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces upgrade-haproxy-25431' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-haproxy-25431 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-haproxy-25431 + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.32NJxEtkbK ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.s8JbwOKL3Y + local LAST_ERR=/tmp/tmp.OkEkfd70fo + local exit_status=0 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.a2kb2Ghrrm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-haproxy-25431 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.32NJxEtkbK + cat /tmp/tmp.OkEkfd70fo + rm /tmp/tmp.32NJxEtkbK /tmp/tmp.OkEkfd70fo + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-haproxy-25431 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-haproxy-25431 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.s8JbwOKL3Y + cat /tmp/tmp.a2kb2Ghrrm Error from server (NotFound): namespaces "upgrade-haproxy-25431" not found + rm /tmp/tmp.s8JbwOKL3Y /tmp/tmp.a2kb2Ghrrm + return 1 + : + wait_for_delete namespace/upgrade-haproxy-25431 + local res=namespace/upgrade-haproxy-25431 + echo -n 'waiting for namespace/upgrade-haproxy-25431 to be deleted' waiting for namespace/upgrade-haproxy-25431 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "upgrade-haproxy-25431" not found + desc 'create namespace upgrade-haproxy-25431' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-haproxy-25431 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-haproxy-25431 ++ mktemp + local LAST_OUT=/tmp/tmp.Wtpz23QKeP ++ mktemp + local LAST_ERR=/tmp/tmp.9znR1fiz7y + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace upgrade-haproxy-25431 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Wtpz23QKeP namespace/upgrade-haproxy-25431 created + cat /tmp/tmp.9znR1fiz7y + rm /tmp/tmp.Wtpz23QKeP /tmp/tmp.9znR1fiz7y + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.t8yxe8CQpi +++ mktemp ++ local LAST_ERR=/tmp/tmp.9PaJ5OD5rn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.t8yxe8CQpi ++ cat /tmp/tmp.9PaJ5OD5rn ++ rm /tmp/tmp.t8yxe8CQpi /tmp/tmp.9PaJ5OD5rn ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1 --namespace=upgrade-haproxy-25431 ++ mktemp + local LAST_OUT=/tmp/tmp.G11HxmyAiY ++ mktemp + local LAST_ERR=/tmp/tmp.SkxXaj4bET + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1 --namespace=upgrade-haproxy-25431 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.G11HxmyAiY Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1" modified. + cat /tmp/tmp.SkxXaj4bET + rm /tmp/tmp.G11HxmyAiY /tmp/tmp.SkxXaj4bET + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.MGOTDPGdB6 ++ mktemp + local LAST_ERR=/tmp/tmp.o4ZTCm0GS6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MGOTDPGdB6 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.o4ZTCm0GS6 + rm /tmp/tmp.MGOTDPGdB6 /tmp/tmp.o4ZTCm0GS6 + return 0 + start_minio + deploy_helm upgrade-haproxy-25431 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "chaos-mesh" chart repository Update Complete. ⎈Happy Helming!⎈ + local cert_secret= + local endpoint=http://minio-service:9000 + minio_args=(--version $MINIO_VER --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set "users[0].accessKey=some-access-key" --set "users[0].secretKey=some-secret-key" --set "users[0].policy=consoleAdmin" --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G) + local minio_args + [[ -n '' ]] + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio NAME: minio-service LAST DEPLOYED: Fri Oct 10 10:30:02 2025 NAMESPACE: upgrade-haproxy-25431 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.upgrade-haproxy-25431.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace upgrade-haproxy-25431 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace upgrade-haproxy-25431 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace upgrade-haproxy-25431 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace upgrade-haproxy-25431 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local + sleep 30 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1fJKNZ1X7g +++ mktemp ++ local LAST_ERR=/tmp/tmp.og6XovT5r5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1fJKNZ1X7g ++ cat /tmp/tmp.og6XovT5r5 ++ rm /tmp/tmp.1fJKNZ1X7g /tmp/tmp.og6XovT5r5 ++ return 0 + MINIO_POD=minio-service-55fcc5d75f-g4tpb + wait_pod minio-service-55fcc5d75f-g4tpb + local pod=minio-service-55fcc5d75f-g4tpb + local max_retry=480 + local ns= ++ echo minio-service-55fcc5d75f-g4tpb ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/minio-service-55fcc5d75f-g4tpb condition met waiting for pod/minio-service-55fcc5d75f-g4tpb to become Ready.Ok + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing ++ mktemp + local LAST_OUT=/tmp/tmp.02cKivQJtN ++ mktemp + local LAST_ERR=/tmp/tmp.CUYUVdTLvW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.02cKivQJtN make_bucket: operator-testing pod "aws-cli" deleted from upgrade-haproxy-25431 namespace + cat /tmp/tmp.CUYUVdTLvW + rm /tmp/tmp.02cKivQJtN /tmp/tmp.CUYUVdTLvW + return 0 + local proxy=haproxy + local cr_yaml=/tmp/tmp.z4cN68TT63/cr_1.18.0_haproxy.yaml + prepare_cr_yaml /tmp/tmp.z4cN68TT63/cr_1.18.0_haproxy.yaml haproxy upgrade-haproxy 3 v1.18.0 + local cr_yaml=/tmp/tmp.z4cN68TT63/cr_1.18.0_haproxy.yaml + local proxy=haproxy + local cluster=upgrade-haproxy + local cluster_size=3 + local git_tag=v1.18.0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.18.0/deploy/cr.yaml + yq eval ' .metadata.name = "upgrade-haproxy" | .spec.secretsName = "my-cluster-secrets" | .spec.vaultSecretName = "some-name-vault" | .spec.sslSecretName = "some-name-ssl" | .spec.sslInternalSecretName = "some-name-ssl-internal" | .spec.upgradeOptions.apply = "disabled" | .spec.pxc.size = 3 | .spec.proxysql.size = 3 | .spec.haproxy.size = 3 | .spec.pxc.image = "-pxc" | .spec.proxysql.image = "-proxysql" | .spec.haproxy.image = "-haproxy" | .spec.backup.image = "-backup" | .spec.backup.storages.minio.s3.credentialsSecret = "minio-secret" | .spec.backup.storages.minio.s3.region = "us-east-1" | .spec.backup.storages.minio.s3.bucket = "operator-testing" | .spec.backup.storages.minio.s3.endpointUrl = "http://minio-service.#namespace:9000/" | .spec.backup.storages.minio.type = "s3" | .spec.pmm.image = "-pmm" ' - + [[ haproxy == \h\a\p\r\o\x\y ]] + yq -i eval ' .spec.haproxy.enabled = true | .spec.proxysql.enabled = false ' /tmp/tmp.z4cN68TT63/cr_1.18.0_haproxy.yaml + [[ -n '' ]] + spinup_pxc upgrade-haproxy /tmp/tmp.z4cN68TT63/cr_1.18.0_haproxy.yaml 3 30 /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets_without_tls.yml + local cluster=upgrade-haproxy + local config=/tmp/tmp.z4cN68TT63/cr_1.18.0_haproxy.yaml + local size=3 + local sleep=30 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets_without_tls.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets_without_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.DQYBgpoZL6 ++ mktemp + local LAST_ERR=/tmp/tmp.LAN5cdpiZ5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/secrets_without_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DQYBgpoZL6 secret/my-cluster-secrets created + cat /tmp/tmp.LAN5cdpiZ5 + rm /tmp/tmp.DQYBgpoZL6 /tmp/tmp.LAN5cdpiZ5 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/conf/client.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:5.7.44-31.65#' + local LAST_OUT=/tmp/tmp.9CvMwY5td8 + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:5.7.44-31.65#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:3.3.1#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/proxysql2:2.7.3#' + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtrabackup:2.4.29#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/haproxy:2.8.15#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: percona/fluentbit:4.0.1#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:1.18.0#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-haproxy-25431~ + local LAST_ERR=/tmp/tmp.wFZLG4ku3R + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9CvMwY5td8 deployment.apps/pxc-client created + cat /tmp/tmp.wFZLG4ku3R + rm /tmp/tmp.9CvMwY5td8 /tmp/tmp.wFZLG4ku3R + return 0 + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] + [[ upgrade-haproxy == \d\e\m\a\n\d\-\b\a\c\k\u\p ]] + [[ upgrade-haproxy == \d\e\m\a\n\d\-\b\a\c\k\u\p\-\c\l\o\u\d ]] + apply_config /tmp/tmp.z4cN68TT63/cr_1.18.0_haproxy.yaml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /tmp/tmp.z4cN68TT63/cr_1.18.0_haproxy.yaml + cat /tmp/tmp.z4cN68TT63/cr_1.18.0_haproxy.yaml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:5.7.44-31.65#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:1.18.0#' + local LAST_OUT=/tmp/tmp.5BXR6l6Jnm + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:5.7.44-31.65#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/haproxy:2.8.15#' ++ mktemp + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/proxysql2:2.7.3#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-haproxy-25431~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: percona/fluentbit:4.0.1#' + local LAST_ERR=/tmp/tmp.ROMj8ronJS + local exit_status=0 + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:3.3.1#' + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtrabackup:2.4.29#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5BXR6l6Jnm perconaxtradbcluster.pxc.percona.com/upgrade-haproxy created + cat /tmp/tmp.ROMj8ronJS + rm /tmp/tmp.5BXR6l6Jnm /tmp/tmp.ROMj8ronJS + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy upgrade-haproxy ++ local target_cluster=upgrade-haproxy +++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tWrC51LQs5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aW9hWiDB7p +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.tWrC51LQs5 +++ cat /tmp/tmp.aW9hWiDB7p +++ rm /tmp/tmp.tWrC51LQs5 /tmp/tmp.aW9hWiDB7p +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo upgrade-haproxy-haproxy ++ return + local proxy=upgrade-haproxy-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-25431 ++ mktemp + local LAST_OUT=/tmp/tmp.yTirJ2fdMw ++ mktemp + local LAST_ERR=/tmp/tmp.JCKfya5Z5H + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-25431 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-25431 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-25431 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.yTirJ2fdMw + cat /tmp/tmp.JCKfya5Z5H error: no matching resources found + rm /tmp/tmp.yTirJ2fdMw /tmp/tmp.JCKfya5Z5H + return 1 + true + wait_for_running upgrade-haproxy-haproxy 1 + local name=upgrade-haproxy-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-haproxy-0 480 + local pod=upgrade-haproxy-haproxy-0 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace Error from server (NotFound): pods "upgrade-haproxy-haproxy-0" not found waiting for pod/upgrade-haproxy-haproxy-0 to become Ready......................................Ok + wait_for_running upgrade-haproxy-pxc 3 + local name=upgrade-haproxy-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-0 480 + local pod=upgrade-haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-0 condition met waiting for pod/upgrade-haproxy-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-1 480 + local pod=upgrade-haproxy-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-1 condition met waiting for pod/upgrade-haproxy-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-2 480 + local pod=upgrade-haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-2 condition met waiting for pod/upgrade-haproxy-pxc-2 to become Ready.Ok + sleep 30 ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.CRG0YCryeO +++ mktemp ++ local LAST_ERR=/tmp/tmp.ieexrgJmvp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CRG0YCryeO ++ cat /tmp/tmp.ieexrgJmvp ++ rm /tmp/tmp.CRG0YCryeO /tmp/tmp.ieexrgJmvp ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] ++ is_keyring_plugin_in_use upgrade-haproxy ++ local cluster=upgrade-haproxy ++ kubectl_bin exec -it upgrade-haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zBH6E2cIY3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ly47rYMGoe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it upgrade-haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zBH6E2cIY3 ++ cat /tmp/tmp.Ly47rYMGoe Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.zBH6E2cIY3 /tmp/tmp.Ly47rYMGoe ++ return 0 + [[ -n '' ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h upgrade-haproxy-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h upgrade-haproxy-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mrhfO03wry +++ mktemp ++ local LAST_ERR=/tmp/tmp.B7NLue0WNE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mrhfO03wry ++ cat /tmp/tmp.B7NLue0WNE ++ rm /tmp/tmp.mrhfO03wry /tmp/tmp.B7NLue0WNE ++ return 0 + client_pod=pxc-client-65c4d67b5b-4g5gb + wait_pod pxc-client-65c4d67b5b-4g5gb + local pod=pxc-client-65c4d67b5b-4g5gb + local max_retry=480 + local ns= ++ echo pxc-client-65c4d67b5b-4g5gb ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-65c4d67b5b-4g5gb condition met waiting for pod/pxc-client-65c4d67b5b-4g5gb to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h upgrade-haproxy-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h upgrade-haproxy-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XMnAVnZlqP +++ mktemp ++ local LAST_ERR=/tmp/tmp.yzkYOScAnV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XMnAVnZlqP ++ cat /tmp/tmp.yzkYOScAnV ++ rm /tmp/tmp.XMnAVnZlqP /tmp/tmp.yzkYOScAnV ++ return 0 + client_pod=pxc-client-65c4d67b5b-4g5gb + wait_pod pxc-client-65c4d67b5b-4g5gb + local pod=pxc-client-65c4d67b5b-4g5gb + local max_retry=480 + local ns= ++ echo pxc-client-65c4d67b5b-4g5gb ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-65c4d67b5b-4g5gb condition met waiting for pod/pxc-client-65c4d67b5b-4g5gb to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U1OeEBvaMr +++ mktemp ++ local LAST_ERR=/tmp/tmp.6K0KGUghbv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.U1OeEBvaMr ++ cat /tmp/tmp.6K0KGUghbv ++ rm /tmp/tmp.U1OeEBvaMr /tmp/tmp.6K0KGUghbv ++ return 0 + client_pod=pxc-client-65c4d67b5b-4g5gb + wait_pod pxc-client-65c4d67b5b-4g5gb + local pod=pxc-client-65c4d67b5b-4g5gb + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-65c4d67b5b-4g5gb ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-65c4d67b5b-4g5gb condition met waiting for pod/pxc-client-65c4d67b5b-4g5gb to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.z4cN68TT63/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.z4cN68TT63/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rBZGPQLYUE +++ mktemp ++ local LAST_ERR=/tmp/tmp.LLalYF2LdL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rBZGPQLYUE ++ cat /tmp/tmp.LLalYF2LdL ++ rm /tmp/tmp.rBZGPQLYUE /tmp/tmp.LLalYF2LdL ++ return 0 + client_pod=pxc-client-65c4d67b5b-4g5gb + wait_pod pxc-client-65c4d67b5b-4g5gb + local pod=pxc-client-65c4d67b5b-4g5gb + local max_retry=480 + local ns= ++ echo pxc-client-65c4d67b5b-4g5gb ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-65c4d67b5b-4g5gb condition met waiting for pod/pxc-client-65c4d67b5b-4g5gb to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.z4cN68TT63/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.z4cN68TT63/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M7pn0kEWJP +++ mktemp ++ local LAST_ERR=/tmp/tmp.PyiOiGBqpA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.M7pn0kEWJP ++ cat /tmp/tmp.PyiOiGBqpA ++ rm /tmp/tmp.M7pn0kEWJP /tmp/tmp.PyiOiGBqpA ++ return 0 + client_pod=pxc-client-65c4d67b5b-4g5gb + wait_pod pxc-client-65c4d67b5b-4g5gb + local pod=pxc-client-65c4d67b5b-4g5gb + local max_retry=480 + local ns= ++ echo pxc-client-65c4d67b5b-4g5gb ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-65c4d67b5b-4g5gb condition met waiting for pod/pxc-client-65c4d67b5b-4g5gb to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.z4cN68TT63/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.z4cN68TT63/select-1.sql ++ is_keyring_plugin_in_use upgrade-haproxy ++ local cluster=upgrade-haproxy ++ kubectl_bin exec -it upgrade-haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.13VEyMHurB +++ mktemp ++ local LAST_ERR=/tmp/tmp.tMxODJsbwW ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it upgrade-haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.13VEyMHurB ++ cat /tmp/tmp.tMxODJsbwW Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.13VEyMHurB /tmp/tmp.tMxODJsbwW ++ return 0 + '[' '' ']' + compare_generation 1 haproxy upgrade-haproxy + local generation=1 + local proxy=haproxy + local cluster=upgrade-haproxy + local current_generation + [[ haproxy == \h\a\p\r\o\x\y ]] + containers=(pxc haproxy) + for container in '"${containers[@]}"' + check_generation 1 pxc upgrade-haproxy + local generation=1 + local container=pxc + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CusBNJKdUJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Abtczl63TR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CusBNJKdUJ ++ cat /tmp/tmp.Abtczl63TR ++ rm /tmp/tmp.CusBNJKdUJ /tmp/tmp.Abtczl63TR ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + for container in '"${containers[@]}"' + check_generation 1 haproxy upgrade-haproxy + local generation=1 + local container=haproxy + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-haproxy -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6zD9q9vyh5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.UtbyG5YVo4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-haproxy -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6zD9q9vyh5 ++ cat /tmp/tmp.UtbyG5YVo4 ++ rm /tmp/tmp.6zD9q9vyh5 /tmp/tmp.UtbyG5YVo4 ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + run_backup upgrade-haproxy on-demand-backup-minio + local cluster=upgrade-haproxy + local backup=on-demand-backup-minio + log 'run pxc-backup/on-demand-backup-minio' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-10-10T10:36:33+0000]' run pxc-backup/on-demand-backup-minio [2025-10-10T10:36:33+0000] run pxc-backup/on-demand-backup-minio + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/conf/on-demand-backup-minio.yml ++ mktemp + local LAST_OUT=/tmp/tmp.cCRPae3wsQ ++ mktemp + local LAST_ERR=/tmp/tmp.G2KLhWoHqK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/conf/on-demand-backup-minio.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.cCRPae3wsQ perconaxtradbclusterbackup.pxc.percona.com/on-demand-backup-minio created + cat /tmp/tmp.G2KLhWoHqK + rm /tmp/tmp.cCRPae3wsQ /tmp/tmp.G2KLhWoHqK + return 0 + wait_backup on-demand-backup-minio + local backup=on-demand-backup-minio + local status=Succeeded + set +o xtrace waiting for pxc-backup/on-demand-backup-minio to reach Succeeded state.......................Succeeded + desc 'upgrade operator' + set +o xtrace ----------------------------------------------------------------------------------- upgrade operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.EHHKEGHoCK ++ mktemp + local LAST_ERR=/tmp/tmp.bpziX2ATgt + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.EHHKEGHoCK customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.bpziX2ATgt + rm /tmp/tmp.EHHKEGHoCK /tmp/tmp.bpziX2ATgt + return 0 + [[ -n pxc-operator ]] + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.akXj9NfmzO ++ mktemp + local LAST_ERR=/tmp/tmp.da15n5udkg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.akXj9NfmzO clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.da15n5udkg + rm /tmp/tmp.akXj9NfmzO /tmp/tmp.da15n5udkg + return 0 + kubectl_bin patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7"}]}}}}' -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.WBqOtsoLLg ++ mktemp + local LAST_ERR=/tmp/tmp.la6tTTeBtO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7"}]}}}}' -n pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WBqOtsoLLg deployment.apps/percona-xtradb-cluster-operator patched + cat /tmp/tmp.la6tTTeBtO + rm /tmp/tmp.WBqOtsoLLg /tmp/tmp.la6tTTeBtO + return 0 + kubectl_bin rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.qQEo6D7I1h ++ mktemp + local LAST_ERR=/tmp/tmp.WTUm1dhy20 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qQEo6D7I1h Waiting for deployment "percona-xtradb-cluster-operator" rollout to finish: 0 of 1 updated replicas are available... deployment "percona-xtradb-cluster-operator" successfully rolled out + cat /tmp/tmp.WTUm1dhy20 + rm /tmp/tmp.qQEo6D7I1h /tmp/tmp.WTUm1dhy20 + return 0 + sleep 10 + desc 'wait for operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- wait for operator upgrade ----------------------------------------------------------------------------------- + local i=0 + local max=60 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ grep -vc NAME ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h1LdKEYXNV +++ mktemp ++ local LAST_ERR=/tmp/tmp.9OSoa1sNM9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.h1LdKEYXNV ++ cat /tmp/tmp.9OSoa1sNM9 ++ rm /tmp/tmp.h1LdKEYXNV /tmp/tmp.9OSoa1sNM9 ++ return 0 + [[ 1 -eq 1 ]] + '[' -n pxc-operator ']' ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.6p9X1GIzcX +++ mktemp ++ local LAST_ERR=/tmp/tmp.VzXImnY4eD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6p9X1GIzcX ++ cat /tmp/tmp.VzXImnY4eD ++ rm /tmp/tmp.6p9X1GIzcX /tmp/tmp.VzXImnY4eD ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.DTi2kF70GV ++ mktemp + local LAST_ERR=/tmp/tmp.XXV6XmV8b7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DTi2kF70GV Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1" modified. + cat /tmp/tmp.XXV6XmV8b7 + rm /tmp/tmp.DTi2kF70GV /tmp/tmp.XXV6XmV8b7 + return 0 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ awk '{print $1}' +++ mktemp ++ grep perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7 ++ local LAST_OUT=/tmp/tmp.UIV2XYa0uY +++ mktemp ++ local LAST_ERR=/tmp/tmp.Is32uaMHr2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.UIV2XYa0uY ++ cat /tmp/tmp.Is32uaMHr2 ++ rm /tmp/tmp.UIV2XYa0uY /tmp/tmp.Is32uaMHr2 ++ return 0 + wait_pod percona-xtradb-cluster-operator-5b676b9b4d-plfxl + local pod=percona-xtradb-cluster-operator-5b676b9b4d-plfxl + local max_retry=480 + local ns= ++ echo percona-xtradb-cluster-operator-5b676b9b4d-plfxl ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-5b676b9b4d-plfxl condition met waiting for pod/percona-xtradb-cluster-operator-5b676b9b4d-plfxl to become Ready.Ok ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.9IG2LmdWaE +++ mktemp ++ local LAST_ERR=/tmp/tmp.JZWYRCowQQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9IG2LmdWaE ++ cat /tmp/tmp.JZWYRCowQQ ++ rm /tmp/tmp.9IG2LmdWaE /tmp/tmp.JZWYRCowQQ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1 --namespace=upgrade-haproxy-25431 ++ mktemp + local LAST_OUT=/tmp/tmp.iBmXKImWtb ++ mktemp + local LAST_ERR=/tmp/tmp.oF92aoKSbL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1 --namespace=upgrade-haproxy-25431 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.iBmXKImWtb Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2207-f8e092d7-9-cluster1" modified. + cat /tmp/tmp.oF92aoKSbL + rm /tmp/tmp.iBmXKImWtb /tmp/tmp.oF92aoKSbL + return 0 + desc 'check images and generation after operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after operator upgrade ----------------------------------------------------------------------------------- + check_pxc_liveness upgrade-haproxy 3 + local cluster=upgrade-haproxy + local cluster_size=3 + wait_cluster_consistency upgrade-haproxy 3 + local cluster_name=upgrade-haproxy + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size upgrade-haproxy ++ local cluster=upgrade-haproxy +++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xjbiDPLuRk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PVecfgqx4c +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.xjbiDPLuRk +++ cat /tmp/tmp.PVecfgqx4c +++ rm /tmp/tmp.xjbiDPLuRk /tmp/tmp.PVecfgqx4c +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ceOi6yoMlY +++ mktemp ++ local LAST_ERR=/tmp/tmp.6TcY8crTUf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ceOi6yoMlY ++ cat /tmp/tmp.6TcY8crTUf ++ rm /tmp/tmp.ceOi6yoMlY /tmp/tmp.6TcY8crTUf ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/upgrade-haproxy to be ready' waiting for pxc/upgrade-haproxy to be ready++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gb5I6dSBE3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TMNvzhboiy ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Gb5I6dSBE3 ++ cat /tmp/tmp.TMNvzhboiy ++ rm /tmp/tmp.Gb5I6dSBE3 /tmp/tmp.TMNvzhboiy ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PbZ8uKLdbJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.vCyOGMJIBa ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PbZ8uKLdbJ ++ cat /tmp/tmp.vCyOGMJIBa ++ rm /tmp/tmp.PbZ8uKLdbJ /tmp/tmp.vCyOGMJIBa ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine upgrade-haproxy +++ local cluster_name=upgrade-haproxy ++++ get_proxy upgrade-haproxy ++++ local target_cluster=upgrade-haproxy +++++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.ilb03Z7ewA ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ZlxSwptZyq +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.ilb03Z7ewA +++++ cat /tmp/tmp.ZlxSwptZyq +++++ rm /tmp/tmp.ilb03Z7ewA /tmp/tmp.ZlxSwptZyq +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo upgrade-haproxy-haproxy ++++ return +++ local cluster_proxy=upgrade-haproxy-haproxy +++ echo haproxy ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GmJCGQ12Ne +++ mktemp ++ local LAST_ERR=/tmp/tmp.tdefFVpV2v ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.GmJCGQ12Ne ++ cat /tmp/tmp.tdefFVpV2v ++ rm /tmp/tmp.GmJCGQ12Ne /tmp/tmp.tdefFVpV2v ++ return 0 + [[ 3 == \3 ]] + echo + wait_for_running upgrade-haproxy-pxc 3 + local name=upgrade-haproxy-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-0 480 + local pod=upgrade-haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-0 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-0 condition met waiting for pod/upgrade-haproxy-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-1 480 + local pod=upgrade-haproxy-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-1 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-1 condition met waiting for pod/upgrade-haproxy-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-2 480 + local pod=upgrade-haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-2 condition met waiting for pod/upgrade-haproxy-pxc-2 to become Ready.Ok ++ seq 0 2 + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6VSxBcbQ0J +++ mktemp ++ local LAST_ERR=/tmp/tmp.9sK7wbuKKO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6VSxBcbQ0J ++ cat /tmp/tmp.9sK7wbuKKO ++ rm /tmp/tmp.6VSxBcbQ0J /tmp/tmp.9sK7wbuKKO ++ return 0 + client_pod=pxc-client-65c4d67b5b-4g5gb + wait_pod pxc-client-65c4d67b5b-4g5gb + local pod=pxc-client-65c4d67b5b-4g5gb + local max_retry=480 + local ns= ++ echo pxc-client-65c4d67b5b-4g5gb ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-65c4d67b5b-4g5gb condition met waiting for pod/pxc-client-65c4d67b5b-4g5gb to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.z4cN68TT63/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.z4cN68TT63/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S0vdILYmcx +++ mktemp ++ local LAST_ERR=/tmp/tmp.qk9SL40mAp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.S0vdILYmcx ++ cat /tmp/tmp.qk9SL40mAp ++ rm /tmp/tmp.S0vdILYmcx /tmp/tmp.qk9SL40mAp ++ return 0 + client_pod=pxc-client-65c4d67b5b-4g5gb + wait_pod pxc-client-65c4d67b5b-4g5gb + local pod=pxc-client-65c4d67b5b-4g5gb + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-65c4d67b5b-4g5gb ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-65c4d67b5b-4g5gb condition met waiting for pod/pxc-client-65c4d67b5b-4g5gb to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.z4cN68TT63/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.z4cN68TT63/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8n3IgA7SSD +++ mktemp ++ local LAST_ERR=/tmp/tmp.VRTPgzwKO0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8n3IgA7SSD ++ cat /tmp/tmp.VRTPgzwKO0 ++ rm /tmp/tmp.8n3IgA7SSD /tmp/tmp.VRTPgzwKO0 ++ return 0 + client_pod=pxc-client-65c4d67b5b-4g5gb + wait_pod pxc-client-65c4d67b5b-4g5gb + local pod=pxc-client-65c4d67b5b-4g5gb + local max_retry=480 + local ns= ++ echo pxc-client-65c4d67b5b-4g5gb ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-65c4d67b5b-4g5gb condition met waiting for pod/pxc-client-65c4d67b5b-4g5gb to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.z4cN68TT63/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.z4cN68TT63/select-1.sql ++ kubectl_bin get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qr1It8R5ad +++ mktemp ++ local LAST_ERR=/tmp/tmp.lGgdw1aPz0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qr1It8R5ad ++ cat /tmp/tmp.lGgdw1aPz0 ++ rm /tmp/tmp.qr1It8R5ad /tmp/tmp.lGgdw1aPz0 ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7 == perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.proxysql.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oOHVOcQHJ2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gIvgwt6Tzk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.proxysql.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oOHVOcQHJ2 ++ cat /tmp/tmp.gIvgwt6Tzk ++ rm /tmp/tmp.oOHVOcQHJ2 /tmp/tmp.gIvgwt6Tzk ++ return 0 + [[ percona/proxysql2:2.7.3 == percona/proxysql2:2.7.3 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l52sfAHKb1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uxnLWEk5SZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.l52sfAHKb1 ++ cat /tmp/tmp.uxnLWEk5SZ ++ rm /tmp/tmp.l52sfAHKb1 /tmp/tmp.uxnLWEk5SZ ++ return 0 + [[ percona/haproxy:2.8.15 == percona/haproxy:2.8.15 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zOYIp5I88B +++ mktemp ++ local LAST_ERR=/tmp/tmp.hGuJgLoXim ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zOYIp5I88B ++ cat /tmp/tmp.hGuJgLoXim ++ rm /tmp/tmp.zOYIp5I88B /tmp/tmp.hGuJgLoXim ++ return 0 + [[ percona/percona-xtrabackup:2.4.29 == percona/percona-xtrabackup:2.4.29 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dfIGWWmSzt +++ mktemp ++ local LAST_ERR=/tmp/tmp.D78JfRF8nJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dfIGWWmSzt ++ cat /tmp/tmp.D78JfRF8nJ ++ rm /tmp/tmp.dfIGWWmSzt /tmp/tmp.D78JfRF8nJ ++ return 0 + [[ percona/pmm-client:3.3.1 == percona/pmm-client:3.3.1 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.logcollector.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VXk2Slnu5n +++ mktemp ++ local LAST_ERR=/tmp/tmp.iZCUW8RhSa ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.logcollector.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.VXk2Slnu5n ++ cat /tmp/tmp.iZCUW8RhSa ++ rm /tmp/tmp.VXk2Slnu5n /tmp/tmp.iZCUW8RhSa ++ return 0 + [[ percona/fluentbit:4.0.1 == percona/fluentbit:4.0.1 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.pxc.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CYuNcCx26v +++ mktemp ++ local LAST_ERR=/tmp/tmp.XlhxaSZgWx ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.pxc.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CYuNcCx26v ++ cat /tmp/tmp.XlhxaSZgWx ++ rm /tmp/tmp.CYuNcCx26v /tmp/tmp.XlhxaSZgWx ++ return 0 + [[ percona/percona-xtradb-cluster:5.7.44-31.65 == percona/percona-xtradb-cluster:5.7.44-31.65 ]] + : Operator image has been updated correctly + compare_generation 1 haproxy upgrade-haproxy + local generation=1 + local proxy=haproxy + local cluster=upgrade-haproxy + local current_generation + [[ haproxy == \h\a\p\r\o\x\y ]] + containers=(pxc haproxy) + for container in '"${containers[@]}"' + check_generation 1 pxc upgrade-haproxy + local generation=1 + local container=pxc + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MAOdD9XTB7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zVXUuViewd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MAOdD9XTB7 ++ cat /tmp/tmp.zVXUuViewd ++ rm /tmp/tmp.MAOdD9XTB7 /tmp/tmp.zVXUuViewd ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + for container in '"${containers[@]}"' + check_generation 1 haproxy upgrade-haproxy + local generation=1 + local container=haproxy + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-haproxy -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RX2MMNy2gv +++ mktemp ++ local LAST_ERR=/tmp/tmp.sPyGrPiUC8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-haproxy -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RX2MMNy2gv ++ cat /tmp/tmp.sPyGrPiUC8 ++ rm /tmp/tmp.RX2MMNy2gv /tmp/tmp.sPyGrPiUC8 ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'patch pxc images and upgrade' + set +o xtrace ----------------------------------------------------------------------------------- patch pxc images and upgrade ----------------------------------------------------------------------------------- + kubectl_bin patch pxc upgrade-haproxy --type=merge --patch '{ "spec": { "crVersion": "1.19.0", "pxc": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7" }, "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "haproxy": { "image": "perconalab/percona-xtradb-cluster-operator:main-haproxy" }, "proxysql": { "image": "perconalab/percona-xtradb-cluster-operator:main-proxysql" }, "logcollector": { "image": "perconalab/percona-xtradb-cluster-operator:main-logcollector" }, "backup": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup" } }}' ++ mktemp + local LAST_OUT=/tmp/tmp.tccAx3dBmY ++ mktemp + local LAST_ERR=/tmp/tmp.5Oatz2qW1S + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc upgrade-haproxy --type=merge --patch '{ "spec": { "crVersion": "1.19.0", "pxc": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7" }, "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "haproxy": { "image": "perconalab/percona-xtradb-cluster-operator:main-haproxy" }, "proxysql": { "image": "perconalab/percona-xtradb-cluster-operator:main-proxysql" }, "logcollector": { "image": "perconalab/percona-xtradb-cluster-operator:main-logcollector" }, "backup": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup" } }}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tccAx3dBmY perconaxtradbcluster.pxc.percona.com/upgrade-haproxy patched + cat /tmp/tmp.5Oatz2qW1S + rm /tmp/tmp.tccAx3dBmY /tmp/tmp.5Oatz2qW1S + return 0 + sleep 10 + desc 'check images and generation after full upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after full upgrade ----------------------------------------------------------------------------------- + check_pxc_liveness upgrade-haproxy 3 + local cluster=upgrade-haproxy + local cluster_size=3 + wait_cluster_consistency upgrade-haproxy 3 + local cluster_name=upgrade-haproxy + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size upgrade-haproxy ++ local cluster=upgrade-haproxy +++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XtNsNOrvAj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Y3HRNe8SeH +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.XtNsNOrvAj +++ cat /tmp/tmp.Y3HRNe8SeH +++ rm /tmp/tmp.XtNsNOrvAj /tmp/tmp.Y3HRNe8SeH +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ttceBWPzxP +++ mktemp ++ local LAST_ERR=/tmp/tmp.EcTjThdaFx ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ttceBWPzxP ++ cat /tmp/tmp.EcTjThdaFx ++ rm /tmp/tmp.ttceBWPzxP /tmp/tmp.EcTjThdaFx ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/upgrade-haproxy to be ready' waiting for pxc/upgrade-haproxy to be ready++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Hn2VZFyzk6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kZ3UrV1m0F ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Hn2VZFyzk6 ++ cat /tmp/tmp.kZ3UrV1m0F ++ rm /tmp/tmp.Hn2VZFyzk6 /tmp/tmp.kZ3UrV1m0F ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 0 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xm4bMEbB3b +++ mktemp ++ local LAST_ERR=/tmp/tmp.9PlrNf4XIE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xm4bMEbB3b ++ cat /tmp/tmp.9PlrNf4XIE ++ rm /tmp/tmp.xm4bMEbB3b /tmp/tmp.9PlrNf4XIE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 1 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.me7XXHnMlw +++ mktemp ++ local LAST_ERR=/tmp/tmp.5GMJ1tkhNf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.me7XXHnMlw ++ cat /tmp/tmp.5GMJ1tkhNf ++ rm /tmp/tmp.me7XXHnMlw /tmp/tmp.5GMJ1tkhNf ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 2 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YllVqfkc7D +++ mktemp ++ local LAST_ERR=/tmp/tmp.sLYn4Js2lm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YllVqfkc7D ++ cat /tmp/tmp.sLYn4Js2lm ++ rm /tmp/tmp.YllVqfkc7D /tmp/tmp.sLYn4Js2lm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 3 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kyrbcP3kLS +++ mktemp ++ local LAST_ERR=/tmp/tmp.bTD0v15ceE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kyrbcP3kLS ++ cat /tmp/tmp.bTD0v15ceE ++ rm /tmp/tmp.kyrbcP3kLS /tmp/tmp.bTD0v15ceE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 4 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Myq67ELjzK +++ mktemp ++ local LAST_ERR=/tmp/tmp.UlOlMYUaKw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Myq67ELjzK ++ cat /tmp/tmp.UlOlMYUaKw ++ rm /tmp/tmp.Myq67ELjzK /tmp/tmp.UlOlMYUaKw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 5 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8jibOea25F +++ mktemp ++ local LAST_ERR=/tmp/tmp.YV16bUt447 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8jibOea25F ++ cat /tmp/tmp.YV16bUt447 ++ rm /tmp/tmp.8jibOea25F /tmp/tmp.YV16bUt447 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 6 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G9VOKB71WH +++ mktemp ++ local LAST_ERR=/tmp/tmp.5DDVlpim1e ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.G9VOKB71WH ++ cat /tmp/tmp.5DDVlpim1e ++ rm /tmp/tmp.G9VOKB71WH /tmp/tmp.5DDVlpim1e ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 7 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VxvAP1Wccg +++ mktemp ++ local LAST_ERR=/tmp/tmp.hBskOyhdN8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.VxvAP1Wccg ++ cat /tmp/tmp.hBskOyhdN8 ++ rm /tmp/tmp.VxvAP1Wccg /tmp/tmp.hBskOyhdN8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 8 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qLHFg7o4sA +++ mktemp ++ local LAST_ERR=/tmp/tmp.ayeQ0uVis2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qLHFg7o4sA ++ cat /tmp/tmp.ayeQ0uVis2 ++ rm /tmp/tmp.qLHFg7o4sA /tmp/tmp.ayeQ0uVis2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 9 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hxyAJ6BBMZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.UEa0ifJbJY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.hxyAJ6BBMZ ++ cat /tmp/tmp.UEa0ifJbJY ++ rm /tmp/tmp.hxyAJ6BBMZ /tmp/tmp.UEa0ifJbJY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 10 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KLDV9PVbJO +++ mktemp ++ local LAST_ERR=/tmp/tmp.zsvZdxincx ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.KLDV9PVbJO ++ cat /tmp/tmp.zsvZdxincx ++ rm /tmp/tmp.KLDV9PVbJO /tmp/tmp.zsvZdxincx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 11 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.igYegmpjV9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.73OfOFyF17 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.igYegmpjV9 ++ cat /tmp/tmp.73OfOFyF17 ++ rm /tmp/tmp.igYegmpjV9 /tmp/tmp.73OfOFyF17 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 12 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DV5LRLHbVG +++ mktemp ++ local LAST_ERR=/tmp/tmp.WUKWmG3vAz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DV5LRLHbVG ++ cat /tmp/tmp.WUKWmG3vAz ++ rm /tmp/tmp.DV5LRLHbVG /tmp/tmp.WUKWmG3vAz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 13 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dMQu8t2iYy +++ mktemp ++ local LAST_ERR=/tmp/tmp.iPXBYkfht6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dMQu8t2iYy ++ cat /tmp/tmp.iPXBYkfht6 ++ rm /tmp/tmp.dMQu8t2iYy /tmp/tmp.iPXBYkfht6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 14 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9l4U4wjm5L +++ mktemp ++ local LAST_ERR=/tmp/tmp.hysmHQidKc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9l4U4wjm5L ++ cat /tmp/tmp.hysmHQidKc ++ rm /tmp/tmp.9l4U4wjm5L /tmp/tmp.hysmHQidKc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 15 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UddwE7rlj6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.EVisOXr2bF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.UddwE7rlj6 ++ cat /tmp/tmp.EVisOXr2bF ++ rm /tmp/tmp.UddwE7rlj6 /tmp/tmp.EVisOXr2bF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 16 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.haiQslBykE +++ mktemp ++ local LAST_ERR=/tmp/tmp.rGOKEReOuc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.haiQslBykE ++ cat /tmp/tmp.rGOKEReOuc ++ rm /tmp/tmp.haiQslBykE /tmp/tmp.rGOKEReOuc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 17 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1hxiuaL9NK +++ mktemp ++ local LAST_ERR=/tmp/tmp.MfVSznxMtV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1hxiuaL9NK ++ cat /tmp/tmp.MfVSznxMtV ++ rm /tmp/tmp.1hxiuaL9NK /tmp/tmp.MfVSznxMtV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 18 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.po3tfZDWzs +++ mktemp ++ local LAST_ERR=/tmp/tmp.D6HJMrbye2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.po3tfZDWzs ++ cat /tmp/tmp.D6HJMrbye2 ++ rm /tmp/tmp.po3tfZDWzs /tmp/tmp.D6HJMrbye2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 19 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Al3Q0GYHaG +++ mktemp ++ local LAST_ERR=/tmp/tmp.AgKQi4z5S6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Al3Q0GYHaG ++ cat /tmp/tmp.AgKQi4z5S6 ++ rm /tmp/tmp.Al3Q0GYHaG /tmp/tmp.AgKQi4z5S6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 20 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FKFdxhy2Pv +++ mktemp ++ local LAST_ERR=/tmp/tmp.kzGoeneA93 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.FKFdxhy2Pv ++ cat /tmp/tmp.kzGoeneA93 ++ rm /tmp/tmp.FKFdxhy2Pv /tmp/tmp.kzGoeneA93 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 21 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kKjBr67ONB +++ mktemp ++ local LAST_ERR=/tmp/tmp.RTtus2YUVT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kKjBr67ONB ++ cat /tmp/tmp.RTtus2YUVT ++ rm /tmp/tmp.kKjBr67ONB /tmp/tmp.RTtus2YUVT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 22 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U1EBP40XPf +++ mktemp ++ local LAST_ERR=/tmp/tmp.o5TarXwLEH ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.U1EBP40XPf ++ cat /tmp/tmp.o5TarXwLEH ++ rm /tmp/tmp.U1EBP40XPf /tmp/tmp.o5TarXwLEH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 23 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EMujXRBs2X +++ mktemp ++ local LAST_ERR=/tmp/tmp.5ZTG4LFZ4w ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.EMujXRBs2X ++ cat /tmp/tmp.5ZTG4LFZ4w ++ rm /tmp/tmp.EMujXRBs2X /tmp/tmp.5ZTG4LFZ4w ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 24 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DMBQQXVUy6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.CXjYAIcEKz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DMBQQXVUy6 ++ cat /tmp/tmp.CXjYAIcEKz ++ rm /tmp/tmp.DMBQQXVUy6 /tmp/tmp.CXjYAIcEKz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 25 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oFVEmUzo9c +++ mktemp ++ local LAST_ERR=/tmp/tmp.s5vmelkTc1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oFVEmUzo9c ++ cat /tmp/tmp.s5vmelkTc1 ++ rm /tmp/tmp.oFVEmUzo9c /tmp/tmp.s5vmelkTc1 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.od1GiNLPvi +++ mktemp ++ local LAST_ERR=/tmp/tmp.4OfUjJPHNU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.od1GiNLPvi ++ cat /tmp/tmp.4OfUjJPHNU ++ rm /tmp/tmp.od1GiNLPvi /tmp/tmp.4OfUjJPHNU ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine upgrade-haproxy +++ local cluster_name=upgrade-haproxy ++++ get_proxy upgrade-haproxy ++++ local target_cluster=upgrade-haproxy +++++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.bAfqYcp8tz ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.jq0zVLBjpO +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.bAfqYcp8tz +++++ cat /tmp/tmp.jq0zVLBjpO +++++ rm /tmp/tmp.bAfqYcp8tz /tmp/tmp.jq0zVLBjpO +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo upgrade-haproxy-haproxy ++++ return +++ local cluster_proxy=upgrade-haproxy-haproxy +++ echo haproxy ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C5ZgwGwFDZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.ExCfX6H6Zh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.C5ZgwGwFDZ ++ cat /tmp/tmp.ExCfX6H6Zh ++ rm /tmp/tmp.C5ZgwGwFDZ /tmp/tmp.ExCfX6H6Zh ++ return 0 + [[ 3 == \3 ]] + echo + wait_for_running upgrade-haproxy-pxc 3 + local name=upgrade-haproxy-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-0 480 + local pod=upgrade-haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-0 condition met waiting for pod/upgrade-haproxy-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-1 480 + local pod=upgrade-haproxy-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-1 condition met waiting for pod/upgrade-haproxy-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-2 480 + local pod=upgrade-haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-2 condition met waiting for pod/upgrade-haproxy-pxc-2 to become Ready.Ok ++ seq 0 2 + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PXKXzTRS2Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.eTSgWNccSU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PXKXzTRS2Q ++ cat /tmp/tmp.eTSgWNccSU ++ rm /tmp/tmp.PXKXzTRS2Q /tmp/tmp.eTSgWNccSU ++ return 0 + client_pod=pxc-client-65c4d67b5b-4g5gb + wait_pod pxc-client-65c4d67b5b-4g5gb + local pod=pxc-client-65c4d67b5b-4g5gb + local max_retry=480 + local ns= ++ echo pxc-client-65c4d67b5b-4g5gb ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-65c4d67b5b-4g5gb condition met waiting for pod/pxc-client-65c4d67b5b-4g5gb to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.z4cN68TT63/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.z4cN68TT63/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.skq9sEXvtg +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zgs1D5FtBz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.skq9sEXvtg ++ cat /tmp/tmp.Zgs1D5FtBz ++ rm /tmp/tmp.skq9sEXvtg /tmp/tmp.Zgs1D5FtBz ++ return 0 + client_pod=pxc-client-65c4d67b5b-4g5gb + wait_pod pxc-client-65c4d67b5b-4g5gb + local pod=pxc-client-65c4d67b5b-4g5gb + local max_retry=480 + local ns= ++ echo pxc-client-65c4d67b5b-4g5gb ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-65c4d67b5b-4g5gb condition met waiting for pod/pxc-client-65c4d67b5b-4g5gb to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.z4cN68TT63/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.z4cN68TT63/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dPiGDE4rt8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vPQjPy5zE0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dPiGDE4rt8 ++ cat /tmp/tmp.vPQjPy5zE0 ++ rm /tmp/tmp.dPiGDE4rt8 /tmp/tmp.vPQjPy5zE0 ++ return 0 + client_pod=pxc-client-65c4d67b5b-4g5gb + wait_pod pxc-client-65c4d67b5b-4g5gb + local pod=pxc-client-65c4d67b5b-4g5gb + local max_retry=480 + local ns= ++ echo pxc-client-65c4d67b5b-4g5gb ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-65c4d67b5b-4g5gb condition met waiting for pod/pxc-client-65c4d67b5b-4g5gb to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.z4cN68TT63/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.z4cN68TT63/select-1.sql ++ kubectl_bin get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SF7nyXMroL +++ mktemp ++ local LAST_ERR=/tmp/tmp.FCDKKbNus9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SF7nyXMroL ++ cat /tmp/tmp.FCDKKbNus9 ++ rm /tmp/tmp.SF7nyXMroL /tmp/tmp.FCDKKbNus9 ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7 == perconalab/percona-xtradb-cluster-operator:PR-2207-f8e092d7 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.proxysql.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0ni4VRIskL +++ mktemp ++ local LAST_ERR=/tmp/tmp.RWHzWYTuye ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.proxysql.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0ni4VRIskL ++ cat /tmp/tmp.RWHzWYTuye ++ rm /tmp/tmp.0ni4VRIskL /tmp/tmp.RWHzWYTuye ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-proxysql == perconalab/percona-xtradb-cluster-operator:main-proxysql ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8MlK1L5KTe +++ mktemp ++ local LAST_ERR=/tmp/tmp.r26QT31Fzr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8MlK1L5KTe ++ cat /tmp/tmp.r26QT31Fzr ++ rm /tmp/tmp.8MlK1L5KTe /tmp/tmp.r26QT31Fzr ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-haproxy == perconalab/percona-xtradb-cluster-operator:main-haproxy ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XoiTcdrLvF +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z2saWzcw8d ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XoiTcdrLvF ++ cat /tmp/tmp.Z2saWzcw8d ++ rm /tmp/tmp.XoiTcdrLvF /tmp/tmp.Z2saWzcw8d ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup == perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T81HMX64ZL +++ mktemp ++ local LAST_ERR=/tmp/tmp.AGajEmLd8l ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.T81HMX64ZL ++ cat /tmp/tmp.AGajEmLd8l ++ rm /tmp/tmp.T81HMX64ZL /tmp/tmp.AGajEmLd8l ++ return 0 + [[ perconalab/pmm-client:dev-latest == perconalab/pmm-client:dev-latest ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.logcollector.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pWFfo5TzNz +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yn7gZuWpOe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.logcollector.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pWFfo5TzNz ++ cat /tmp/tmp.Yn7gZuWpOe ++ rm /tmp/tmp.pWFfo5TzNz /tmp/tmp.Yn7gZuWpOe ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-logcollector == perconalab/percona-xtradb-cluster-operator:main-logcollector ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.pxc.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.haklpgwZnQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.YsTf0gDfw6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.pxc.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.haklpgwZnQ ++ cat /tmp/tmp.YsTf0gDfw6 ++ rm /tmp/tmp.haklpgwZnQ /tmp/tmp.YsTf0gDfw6 ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 == perconalab/percona-xtradb-cluster-operator:main-pxc5.7 ]] + : Cluster images have been updated correctly + compare_generation 2 haproxy upgrade-haproxy + local generation=2 + local proxy=haproxy + local cluster=upgrade-haproxy + local current_generation + [[ haproxy == \h\a\p\r\o\x\y ]] + containers=(pxc haproxy) + for container in '"${containers[@]}"' + check_generation 2 pxc upgrade-haproxy + local generation=2 + local container=pxc + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ve8iIonCWx +++ mktemp ++ local LAST_ERR=/tmp/tmp.VJJAvmuL5N ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Ve8iIonCWx ++ cat /tmp/tmp.VJJAvmuL5N ++ rm /tmp/tmp.Ve8iIonCWx /tmp/tmp.VJJAvmuL5N ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + for container in '"${containers[@]}"' + check_generation 2 haproxy upgrade-haproxy + local generation=2 + local container=haproxy + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-haproxy -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Lho8Z0X1Hn +++ mktemp ++ local LAST_ERR=/tmp/tmp.OPHRatKfQg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-haproxy -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Lho8Z0X1Hn ++ cat /tmp/tmp.OPHRatKfQg ++ rm /tmp/tmp.Lho8Z0X1Hn /tmp/tmp.OPHRatKfQg ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_kubectl statefulset/upgrade-haproxy-pxc + local resource=statefulset/upgrade-haproxy-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc.yml + local new_result=/tmp/tmp.z4cN68TT63/statefulset_upgrade-haproxy-pxc.yml + desc 'compare statefulset/upgrade-haproxy-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/upgrade-haproxy-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc-eks.yml ']' + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc-aks.yml ']' + kubectl_bin get -o yaml statefulset/upgrade-haproxy-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-haproxy-25431", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.jZBgop9CpY ++ mktemp + local LAST_ERR=/tmp/tmp.dYd9At4Pqv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/upgrade-haproxy-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jZBgop9CpY + cat /tmp/tmp.dYd9At4Pqv + rm /tmp/tmp.jZBgop9CpY /tmp/tmp.dYd9At4Pqv + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-pxc.yml /tmp/tmp.z4cN68TT63/statefulset_upgrade-haproxy-pxc.yml + compare_kubectl statefulset/upgrade-haproxy-haproxy + local resource=statefulset/upgrade-haproxy-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy.yml + local new_result=/tmp/tmp.z4cN68TT63/statefulset_upgrade-haproxy-haproxy.yml + desc 'compare statefulset/upgrade-haproxy-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/upgrade-haproxy-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy-eks.yml ']' + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.0 ]] + [[ percona/percona-xtradb-cluster:5.7.44-31.65 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy-k127.yml ']' + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy-aks.yml ']' + kubectl_bin get -o yaml statefulset/upgrade-haproxy-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("upgrade-haproxy-25431", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.PCfI0VbQRK ++ mktemp + local LAST_ERR=/tmp/tmp.TK1Jk84gqX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/upgrade-haproxy-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.PCfI0VbQRK + cat /tmp/tmp.TK1Jk84gqX + rm /tmp/tmp.PCfI0VbQRK /tmp/tmp.TK1Jk84gqX + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy.yml /tmp/tmp.z4cN68TT63/statefulset_upgrade-haproxy-haproxy.yml --- /mnt/jenkins/workspace/cloud-pxc-operator_PR-2207/e2e-tests/upgrade-haproxy/compare/statefulset_upgrade-haproxy-haproxy.yml 2025-10-10 08:19:22.405109010 +0000 +++ /tmp/tmp.z4cN68TT63/statefulset_upgrade-haproxy-haproxy.yml 2025-10-10 10:42:13.559300020 +0000 @@ -124,8 +124,6 @@ env: - name: PXC_SERVICE value: upgrade-haproxy-pxc - - name: HA_SERVER_OPTIONS - value: resolvers kubernetes check inter 10000 rise 1 fall 2 weight 1 - name: REPLICAS_SVC_ONLY_READERS value: "false" envFrom: