Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/logs/upgrade-haproxy-8-0.log WARNING: version difference between client (1.32) and server (1.28) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.32) and server (1.28) exceeds the supported minor version skew of +/-1 + CLUSTER=upgrade-haproxy + CLUSTER_SIZE=3 + TARGET_OPERATOR_VER=1.17.0 + TARGET_IMAGE=perconalab/percona-xtradb-cluster-operator:PR-1949-00b60e7e + TARGET_IMAGE_PXC=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + TARGET_IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest + TARGET_IMAGE_PROXY=perconalab/percona-xtradb-cluster-operator:main-proxysql + TARGET_IMAGE_HAPROXY=perconalab/percona-xtradb-cluster-operator:main-haproxy + TARGET_IMAGE_BACKUP=perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 == *\p\e\r\c\o\n\a\-\x\t\r\a\d\b\-\c\l\u\s\t\e\r\-\o\p\e\r\a\t\o\r* ]] ++ echo -n perconalab/percona-xtradb-cluster-operator:main-pxc8.0 ++ /usr/bin/sed -r 's/.*([0-9].[0-9])$/\1/' + PXC_VER=8.0 ++ curl -s https://check.percona.com/versions/v1/pxc-operator ++ jq -r '.versions[].operator' ++ sort -V ++ tail -n1 + INIT_OPERATOR_VER=1.16.1 + [[ 1.16.1 == \1\.\1\7\.\0 ]] + GIT_TAG=v1.16.1 ++ curl -s 'https://check.percona.com/versions/v1/pxc-operator/1.16.1/latest?databaseVersion=8.0' + INIT_OPERATOR_IMAGES='{"versions":[{"product":"pxc-operator","operator":"1.16.1","matrix":{"mongod":{},"pxc":{"8.0.39-30.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.39-30.1","imageHash":"6a53a6ad4e7d2c2fb404d274d993414a22cb67beecf7228df9d5d994e7a09966","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.0":{"imagePath":"percona/pmm-client:2.44.0","imageHash":"0737f73449263a14d7000fbe7cd88dfd589dfed975cbb16bd29eee06a5dbd49e","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.1":{"imagePath":"percona/proxysql2:2.7.1","imageHash":"b1c5cd48b218d19386724fa823d20a8454b2de87f4ab445903e8daeb3b6b015b","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.11":{"imagePath":"percona/haproxy:2.8.11","imageHash":"422a210b4170a973f8582ef3d7ddcc879c32bc48f6c66fad8b3154bce4e79b84","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"8.0.35":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35","imageHash":"55281c818a78162cac0c87257915d74f321a4663f3f60457da2566c64610bf49","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1","imageHash":"43fd8ced51adad59394d69e3a1fc8be897668d789049be8f5dd9984672223e72","imageHashArm64":"dcba85dd7a8a164498362c2d3a73af3411e894dd6c01e600a92ddd3fb12122de","status":"recommended","critical":false}},"logCollector":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-logcollector-fluentbit3.2.2","imageHash":"122a103902d27890dceaf1855f175ea706a126aac940feb1089520029937f4a9","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' + OPERATOR_NAME=percona-xtradb-cluster-operator ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.16.1","matrix":{"mongod":{},"pxc":{"8.0.39-30.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.39-30.1","imageHash":"6a53a6ad4e7d2c2fb404d274d993414a22cb67beecf7228df9d5d994e7a09966","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.0":{"imagePath":"percona/pmm-client:2.44.0","imageHash":"0737f73449263a14d7000fbe7cd88dfd589dfed975cbb16bd29eee06a5dbd49e","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.1":{"imagePath":"percona/proxysql2:2.7.1","imageHash":"b1c5cd48b218d19386724fa823d20a8454b2de87f4ab445903e8daeb3b6b015b","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.11":{"imagePath":"percona/haproxy:2.8.11","imageHash":"422a210b4170a973f8582ef3d7ddcc879c32bc48f6c66fad8b3154bce4e79b84","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"8.0.35":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35","imageHash":"55281c818a78162cac0c87257915d74f321a4663f3f60457da2566c64610bf49","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1","imageHash":"43fd8ced51adad59394d69e3a1fc8be897668d789049be8f5dd9984672223e72","imageHashArm64":"dcba85dd7a8a164498362c2d3a73af3411e894dd6c01e600a92ddd3fb12122de","status":"recommended","critical":false}},"logCollector":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-logcollector-fluentbit3.2.2","imageHash":"122a103902d27890dceaf1855f175ea706a126aac940feb1089520029937f4a9","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.operator[].imagePath' + IMAGE=percona/percona-xtradb-cluster-operator:1.16.1 ++ echo perconalab/percona-xtradb-cluster-operator:PR-1949-00b60e7e ++ cut -d/ -f1 + [[ perconalab == \p\e\r\c\o\n\a\l\a\b ]] + IMAGE=perconalab/percona-xtradb-cluster-operator:1.16.1 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.16.1","matrix":{"mongod":{},"pxc":{"8.0.39-30.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.39-30.1","imageHash":"6a53a6ad4e7d2c2fb404d274d993414a22cb67beecf7228df9d5d994e7a09966","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.0":{"imagePath":"percona/pmm-client:2.44.0","imageHash":"0737f73449263a14d7000fbe7cd88dfd589dfed975cbb16bd29eee06a5dbd49e","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.1":{"imagePath":"percona/proxysql2:2.7.1","imageHash":"b1c5cd48b218d19386724fa823d20a8454b2de87f4ab445903e8daeb3b6b015b","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.11":{"imagePath":"percona/haproxy:2.8.11","imageHash":"422a210b4170a973f8582ef3d7ddcc879c32bc48f6c66fad8b3154bce4e79b84","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"8.0.35":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35","imageHash":"55281c818a78162cac0c87257915d74f321a4663f3f60457da2566c64610bf49","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1","imageHash":"43fd8ced51adad59394d69e3a1fc8be897668d789049be8f5dd9984672223e72","imageHashArm64":"dcba85dd7a8a164498362c2d3a73af3411e894dd6c01e600a92ddd3fb12122de","status":"recommended","critical":false}},"logCollector":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-logcollector-fluentbit3.2.2","imageHash":"122a103902d27890dceaf1855f175ea706a126aac940feb1089520029937f4a9","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.pxc[].imagePath' + IMAGE_PXC=percona/percona-xtradb-cluster:8.0.39-30.1 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.16.1","matrix":{"mongod":{},"pxc":{"8.0.39-30.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.39-30.1","imageHash":"6a53a6ad4e7d2c2fb404d274d993414a22cb67beecf7228df9d5d994e7a09966","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.0":{"imagePath":"percona/pmm-client:2.44.0","imageHash":"0737f73449263a14d7000fbe7cd88dfd589dfed975cbb16bd29eee06a5dbd49e","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.1":{"imagePath":"percona/proxysql2:2.7.1","imageHash":"b1c5cd48b218d19386724fa823d20a8454b2de87f4ab445903e8daeb3b6b015b","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.11":{"imagePath":"percona/haproxy:2.8.11","imageHash":"422a210b4170a973f8582ef3d7ddcc879c32bc48f6c66fad8b3154bce4e79b84","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"8.0.35":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35","imageHash":"55281c818a78162cac0c87257915d74f321a4663f3f60457da2566c64610bf49","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1","imageHash":"43fd8ced51adad59394d69e3a1fc8be897668d789049be8f5dd9984672223e72","imageHashArm64":"dcba85dd7a8a164498362c2d3a73af3411e894dd6c01e600a92ddd3fb12122de","status":"recommended","critical":false}},"logCollector":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-logcollector-fluentbit3.2.2","imageHash":"122a103902d27890dceaf1855f175ea706a126aac940feb1089520029937f4a9","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.pmm[].imagePath' + IMAGE_PMM_CLIENT=percona/pmm-client:2.44.0 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.16.1","matrix":{"mongod":{},"pxc":{"8.0.39-30.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.39-30.1","imageHash":"6a53a6ad4e7d2c2fb404d274d993414a22cb67beecf7228df9d5d994e7a09966","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.0":{"imagePath":"percona/pmm-client:2.44.0","imageHash":"0737f73449263a14d7000fbe7cd88dfd589dfed975cbb16bd29eee06a5dbd49e","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.1":{"imagePath":"percona/proxysql2:2.7.1","imageHash":"b1c5cd48b218d19386724fa823d20a8454b2de87f4ab445903e8daeb3b6b015b","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.11":{"imagePath":"percona/haproxy:2.8.11","imageHash":"422a210b4170a973f8582ef3d7ddcc879c32bc48f6c66fad8b3154bce4e79b84","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"8.0.35":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35","imageHash":"55281c818a78162cac0c87257915d74f321a4663f3f60457da2566c64610bf49","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1","imageHash":"43fd8ced51adad59394d69e3a1fc8be897668d789049be8f5dd9984672223e72","imageHashArm64":"dcba85dd7a8a164498362c2d3a73af3411e894dd6c01e600a92ddd3fb12122de","status":"recommended","critical":false}},"logCollector":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-logcollector-fluentbit3.2.2","imageHash":"122a103902d27890dceaf1855f175ea706a126aac940feb1089520029937f4a9","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.proxysql[].imagePath' + IMAGE_PROXY=percona/proxysql2:2.7.1 ++ jq -r '.versions[].matrix.haproxy[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.16.1","matrix":{"mongod":{},"pxc":{"8.0.39-30.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.39-30.1","imageHash":"6a53a6ad4e7d2c2fb404d274d993414a22cb67beecf7228df9d5d994e7a09966","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.0":{"imagePath":"percona/pmm-client:2.44.0","imageHash":"0737f73449263a14d7000fbe7cd88dfd589dfed975cbb16bd29eee06a5dbd49e","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.1":{"imagePath":"percona/proxysql2:2.7.1","imageHash":"b1c5cd48b218d19386724fa823d20a8454b2de87f4ab445903e8daeb3b6b015b","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.11":{"imagePath":"percona/haproxy:2.8.11","imageHash":"422a210b4170a973f8582ef3d7ddcc879c32bc48f6c66fad8b3154bce4e79b84","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"8.0.35":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35","imageHash":"55281c818a78162cac0c87257915d74f321a4663f3f60457da2566c64610bf49","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1","imageHash":"43fd8ced51adad59394d69e3a1fc8be897668d789049be8f5dd9984672223e72","imageHashArm64":"dcba85dd7a8a164498362c2d3a73af3411e894dd6c01e600a92ddd3fb12122de","status":"recommended","critical":false}},"logCollector":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-logcollector-fluentbit3.2.2","imageHash":"122a103902d27890dceaf1855f175ea706a126aac940feb1089520029937f4a9","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' + IMAGE_HAPROXY=percona/haproxy:2.8.11 ++ jq -r '.versions[].matrix.backup[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.16.1","matrix":{"mongod":{},"pxc":{"8.0.39-30.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.39-30.1","imageHash":"6a53a6ad4e7d2c2fb404d274d993414a22cb67beecf7228df9d5d994e7a09966","imageHashArm64":"","status":"recommended","critical":false}},"pmm":{"2.44.0":{"imagePath":"percona/pmm-client:2.44.0","imageHash":"0737f73449263a14d7000fbe7cd88dfd589dfed975cbb16bd29eee06a5dbd49e","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{"2.7.1":{"imagePath":"percona/proxysql2:2.7.1","imageHash":"b1c5cd48b218d19386724fa823d20a8454b2de87f4ab445903e8daeb3b6b015b","imageHashArm64":"","status":"recommended","critical":false}},"haproxy":{"2.8.11":{"imagePath":"percona/haproxy:2.8.11","imageHash":"422a210b4170a973f8582ef3d7ddcc879c32bc48f6c66fad8b3154bce4e79b84","imageHashArm64":"","status":"recommended","critical":false}},"backup":{"8.0.35":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35","imageHash":"55281c818a78162cac0c87257915d74f321a4663f3f60457da2566c64610bf49","imageHashArm64":"","status":"recommended","critical":false}},"operator":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1","imageHash":"43fd8ced51adad59394d69e3a1fc8be897668d789049be8f5dd9984672223e72","imageHashArm64":"dcba85dd7a8a164498362c2d3a73af3411e894dd6c01e600a92ddd3fb12122de","status":"recommended","critical":false}},"logCollector":{"1.16.1":{"imagePath":"percona/percona-xtradb-cluster-operator:1.16.1-logcollector-fluentbit3.2.2","imageHash":"122a103902d27890dceaf1855f175ea706a126aac940feb1089520029937f4a9","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' + IMAGE_BACKUP=percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35 + [[ 1.17.0 == \1\.\1\6\.\1 ]] + main + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.7QMm5LUuND ++ mktemp + local LAST_ERR=/tmp/tmp.eaUpUgn1qx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7QMm5LUuND namespace/cert-manager created + cat /tmp/tmp.eaUpUgn1qx + rm /tmp/tmp.7QMm5LUuND /tmp/tmp.eaUpUgn1qx + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.EmybXizImo ++ mktemp + local LAST_ERR=/tmp/tmp.pUcsMVtrF0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.EmybXizImo namespace/cert-manager labeled + cat /tmp/tmp.pUcsMVtrF0 + rm /tmp/tmp.EmybXizImo /tmp/tmp.pUcsMVtrF0 + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.16.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.j3P68O28Am ++ mktemp + local LAST_ERR=/tmp/tmp.wkf8Kj5WJH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.16.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.j3P68O28Am namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.wkf8Kj5WJH Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.j3P68O28Am /tmp/tmp.wkf8Kj5WJH + return 0 + '[' '' == 4.10 ']' + sleep 70 + create_infra_gh upgrade-haproxy-27703 v1.16.1 + local ns=upgrade-haproxy-27703 + local git_tag=v1.16.1 + '[' -n pxc-operator ']' + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- ++ mktemp + kubectl_bin delete namespace pxc-operator + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.XuuHP97OZU ++ mktemp + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.HqfJEm2yWZ ++ mktemp + local LAST_ERR=/tmp/tmp.wlvG3wwUvA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + local LAST_ERR=/tmp/tmp.tpTe6oPIeV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XuuHP97OZU + cat /tmp/tmp.tpTe6oPIeV + rm /tmp/tmp.XuuHP97OZU /tmp/tmp.tpTe6oPIeV + return 0 namespace "cert-manager" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "upgrade-haproxy-29283" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.HqfJEm2yWZ namespace "pxc-operator" deleted + cat /tmp/tmp.wlvG3wwUvA + rm /tmp/tmp.HqfJEm2yWZ /tmp/tmp.wlvG3wwUvA + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.XKUcivTyba ++ mktemp + local LAST_ERR=/tmp/tmp.XL8WaZeDjE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XKUcivTyba namespace/pxc-operator created + cat /tmp/tmp.XL8WaZeDjE + rm /tmp/tmp.XKUcivTyba /tmp/tmp.XL8WaZeDjE + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.rswR0gDSIa +++ mktemp ++ local LAST_ERR=/tmp/tmp.bTfRkhbgpJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rswR0gDSIa ++ cat /tmp/tmp.bTfRkhbgpJ ++ rm /tmp/tmp.rswR0gDSIa /tmp/tmp.bTfRkhbgpJ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.OkPct4aUeK ++ mktemp + local LAST_ERR=/tmp/tmp.HmcCSTLssD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OkPct4aUeK Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8" modified. + cat /tmp/tmp.HmcCSTLssD + rm /tmp/tmp.OkPct4aUeK /tmp/tmp.HmcCSTLssD + return 0 + deploy_operator_gh v1.16.1 + local git_tag=v1.16.1 + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- ++ kubectl_bin get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KeZUjOnDrW +++ mktemp ++ local LAST_ERR=/tmp/tmp.h0KXhLabla ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.KeZUjOnDrW ++ cat /tmp/tmp.h0KXhLabla ++ rm /tmp/tmp.KeZUjOnDrW /tmp/tmp.h0KXhLabla ++ return 0 + [[ -n perconaxtradbclusters.pxc.percona.com ]] ++ kubectl_bin get crd/perconaxtradbclusters.pxc.percona.com -o 'jsonpath={.spec.versions[?(@.name == "v1-16-1")].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9QzOJwaOv2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4YpZ3WlZNc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/perconaxtradbclusters.pxc.percona.com -o 'jsonpath={.spec.versions[?(@.name == "v1-16-1")].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9QzOJwaOv2 ++ cat /tmp/tmp.4YpZ3WlZNc ++ rm /tmp/tmp.9QzOJwaOv2 /tmp/tmp.4YpZ3WlZNc ++ return 0 + [[ -n '' ]] + kubectl_bin apply --server-side --force-conflicts -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.16.1/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.jsXmmn7mny ++ mktemp + local LAST_ERR=/tmp/tmp.ejznTSg3q7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.16.1/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jsXmmn7mny + cat /tmp/tmp.ejznTSg3q7 + rm /tmp/tmp.jsXmmn7mny /tmp/tmp.ejznTSg3q7 + return 0 + local rbac_yaml=rbac + local operator_yaml=operator.yaml + '[' -n pxc-operator ']' + rbac_yaml=cw-rbac + operator_yaml=cw-operator.yaml + apply_rbac_gh cw-rbac v1.16.1 + local operator_namespace=pxc-operator + local rbac=cw-rbac + local git_tag=v1.16.1 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.16.1/deploy/cw-rbac.yaml + /usr/bin/sed -i -e 's^namespace: .*^namespace: pxc-operator^' /tmp/tmp.JMSKJvP72V/rbac_v1.16.1.yaml + kubectl_bin apply -f /tmp/tmp.JMSKJvP72V/rbac_v1.16.1.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.vpnhZ8TtM2 ++ mktemp + local LAST_ERR=/tmp/tmp.SvEiyNbPBh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /tmp/tmp.JMSKJvP72V/rbac_v1.16.1.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vpnhZ8TtM2 clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.SvEiyNbPBh + rm /tmp/tmp.vpnhZ8TtM2 /tmp/tmp.SvEiyNbPBh + return 0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.16.1/deploy/cw-operator.yaml + cat /tmp/tmp.JMSKJvP72V/cw-operator.yaml_v1.16.1.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:1.16.1^' + kubectl_bin apply -n pxc-operator -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' ++ mktemp + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' + local LAST_OUT=/tmp/tmp.VBJEdNwKIm ++ mktemp + local LAST_ERR=/tmp/tmp.KSpLJMrCRM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n pxc-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VBJEdNwKIm deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.KSpLJMrCRM + rm /tmp/tmp.VBJEdNwKIm /tmp/tmp.KSpLJMrCRM + return 0 + sleep 2 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.bBPbJEgDDN +++ mktemp ++ local LAST_ERR=/tmp/tmp.u0UyqjAUVR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bBPbJEgDDN ++ cat /tmp/tmp.u0UyqjAUVR ++ rm /tmp/tmp.bBPbJEgDDN /tmp/tmp.u0UyqjAUVR ++ return 0 + wait_pod percona-xtradb-cluster-operator-8cb9886bf-d9pnd + local pod=percona-xtradb-cluster-operator-8cb9886bf-d9pnd + local max_retry=480 + local ns= ++ echo percona-xtradb-cluster-operator-8cb9886bf-d9pnd ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-8cb9886bf-d9pnd condition met waiting for pod/percona-xtradb-cluster-operator-8cb9886bf-d9pnd to become Ready.Ok + create_namespace upgrade-haproxy-27703 + local namespace=upgrade-haproxy-27703 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces upgrade-haproxy-27703' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-haproxy-27703 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-haproxy-27703 ++ mktemp + awk '{print$1}' + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + local LAST_OUT=/tmp/tmp.AOM0bzmQUQ + xargs kubectl delete ns ++ mktemp + local LAST_ERR=/tmp/tmp.Jl0c2gMIFW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-haproxy-27703 + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.SZ06pGIuMD ++ mktemp + local LAST_ERR=/tmp/tmp.M4rJDRXwdc + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-haproxy-27703 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SZ06pGIuMD + cat /tmp/tmp.M4rJDRXwdc + rm /tmp/tmp.SZ06pGIuMD /tmp/tmp.M4rJDRXwdc + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-haproxy-27703 namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.AOM0bzmQUQ + cat /tmp/tmp.Jl0c2gMIFW Error from server (NotFound): namespaces "upgrade-haproxy-27703" not found + rm /tmp/tmp.AOM0bzmQUQ /tmp/tmp.Jl0c2gMIFW + return 1 + : + wait_for_delete namespace/upgrade-haproxy-27703 + local res=namespace/upgrade-haproxy-27703 + echo -n 'waiting for namespace/upgrade-haproxy-27703 to be deleted' waiting for namespace/upgrade-haproxy-27703 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "upgrade-haproxy-27703" not found + desc 'create namespace upgrade-haproxy-27703' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-haproxy-27703 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-haproxy-27703 ++ mktemp + local LAST_OUT=/tmp/tmp.flAZAUtRP6 ++ mktemp + local LAST_ERR=/tmp/tmp.CJilwI68NH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace upgrade-haproxy-27703 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.flAZAUtRP6 namespace/upgrade-haproxy-27703 created + cat /tmp/tmp.CJilwI68NH + rm /tmp/tmp.flAZAUtRP6 /tmp/tmp.CJilwI68NH + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.oVD9KXDMQn +++ mktemp ++ local LAST_ERR=/tmp/tmp.IleNcz7VC4 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oVD9KXDMQn ++ cat /tmp/tmp.IleNcz7VC4 ++ rm /tmp/tmp.oVD9KXDMQn /tmp/tmp.IleNcz7VC4 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8 --namespace=upgrade-haproxy-27703 ++ mktemp + local LAST_OUT=/tmp/tmp.GuipBkBik2 ++ mktemp + local LAST_ERR=/tmp/tmp.KmUc39uREE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8 --namespace=upgrade-haproxy-27703 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GuipBkBik2 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8" modified. + cat /tmp/tmp.KmUc39uREE + rm /tmp/tmp.GuipBkBik2 /tmp/tmp.KmUc39uREE + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.YBtgsxTSyS ++ mktemp + local LAST_ERR=/tmp/tmp.DEvqwEbUg5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.YBtgsxTSyS secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.DEvqwEbUg5 + rm /tmp/tmp.YBtgsxTSyS /tmp/tmp.DEvqwEbUg5 + return 0 + start_minio + deploy_helm upgrade-haproxy-27703 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ + local cert_secret= + local endpoint=http://minio-service:9000 + minio_args=(--version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set "users[0].accessKey=some-access-key" --set "users[0].secretKey=some-secret-key" --set "users[0].policy=consoleAdmin" --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G) + local minio_args + [[ -n '' ]] + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio NAME: minio-service LAST DEPLOYED: Thu Jan 23 00:04:31 2025 NAMESPACE: upgrade-haproxy-27703 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.upgrade-haproxy-27703.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace upgrade-haproxy-27703 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace upgrade-haproxy-27703 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace upgrade-haproxy-27703 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace upgrade-haproxy-27703 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local + sleep 30 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7vkhN535yM +++ mktemp ++ local LAST_ERR=/tmp/tmp.XniNf6u2gu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7vkhN535yM ++ cat /tmp/tmp.XniNf6u2gu ++ rm /tmp/tmp.7vkhN535yM /tmp/tmp.XniNf6u2gu ++ return 0 + MINIO_POD=minio-service-776785695-pp5dw + wait_pod minio-service-776785695-pp5dw + local pod=minio-service-776785695-pp5dw + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo minio-service-776785695-pp5dw ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/minio-service-776785695-pp5dw condition met waiting for pod/minio-service-776785695-pp5dw to become Ready.Ok + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing ++ mktemp + local LAST_OUT=/tmp/tmp.eKCbLQmeAZ ++ mktemp + local LAST_ERR=/tmp/tmp.wc71ph4yPC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eKCbLQmeAZ make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.wc71ph4yPC + rm /tmp/tmp.eKCbLQmeAZ /tmp/tmp.wc71ph4yPC + return 0 + local proxy=haproxy + local cr_yaml=/tmp/tmp.JMSKJvP72V/cr_1.16.1_haproxy.yaml + prepare_cr_yaml /tmp/tmp.JMSKJvP72V/cr_1.16.1_haproxy.yaml haproxy upgrade-haproxy 3 v1.16.1 + local cr_yaml=/tmp/tmp.JMSKJvP72V/cr_1.16.1_haproxy.yaml + local proxy=haproxy + local cluster=upgrade-haproxy + local cluster_size=3 + local git_tag=v1.16.1 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.16.1/deploy/cr.yaml + yq eval ' .metadata.name = "upgrade-haproxy" | .spec.secretsName = "my-cluster-secrets" | .spec.vaultSecretName = "some-name-vault" | .spec.sslSecretName = "some-name-ssl" | .spec.sslInternalSecretName = "some-name-ssl-internal" | .spec.upgradeOptions.apply = "disabled" | .spec.pxc.size = 3 | .spec.proxysql.size = 3 | .spec.haproxy.size = 3 | .spec.pxc.image = "-pxc" | .spec.proxysql.image = "-proxysql" | .spec.haproxy.image = "-haproxy" | .spec.backup.image = "-backup" | .spec.backup.storages.minio.s3.credentialsSecret = "minio-secret" | .spec.backup.storages.minio.s3.region = "us-east-1" | .spec.backup.storages.minio.s3.bucket = "operator-testing" | .spec.backup.storages.minio.s3.endpointUrl = "http://minio-service.#namespace:9000/" | .spec.backup.storages.minio.type = "s3" | .spec.pmm.image = "-pmm" ' - + [[ haproxy == \h\a\p\r\o\x\y ]] + yq -i eval ' .spec.haproxy.enabled = true | .spec.proxysql.enabled = false ' /tmp/tmp.JMSKJvP72V/cr_1.16.1_haproxy.yaml + spinup_pxc upgrade-haproxy /tmp/tmp.JMSKJvP72V/cr_1.16.1_haproxy.yaml 3 30 /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/secrets_without_tls.yml + local cluster=upgrade-haproxy + local config=/tmp/tmp.JMSKJvP72V/cr_1.16.1_haproxy.yaml + local size=3 + local sleep=30 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/secrets_without_tls.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/secrets_without_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.7uGuLsFnk5 ++ mktemp + local LAST_ERR=/tmp/tmp.pR5Sq3P4s2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/secrets_without_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7uGuLsFnk5 secret/my-cluster-secrets created + cat /tmp/tmp.pR5Sq3P4s2 + rm /tmp/tmp.7uGuLsFnk5 /tmp/tmp.pR5Sq3P4s2 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.39-30.1#' + local LAST_OUT=/tmp/tmp.tz5zsXaKWO + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:1.16.1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.39-30.1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:2.44.0#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-haproxy-27703~ + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/proxysql2:2.7.1#' + local LAST_ERR=/tmp/tmp.MQaTQiBuCK + local exit_status=0 + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/haproxy:2.8.11#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tz5zsXaKWO deployment.apps/pxc-client created + cat /tmp/tmp.MQaTQiBuCK + rm /tmp/tmp.tz5zsXaKWO /tmp/tmp.MQaTQiBuCK + return 0 + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 5\.7 ]] + apply_config /tmp/tmp.JMSKJvP72V/cr_1.16.1_haproxy.yaml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /tmp/tmp.JMSKJvP72V/cr_1.16.1_haproxy.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.AeS28pB0l5 + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/proxysql2:2.7.1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.39-30.1#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/haproxy:2.8.11#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:1.16.1#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-haproxy-27703~ + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:2.44.0#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.39-30.1#' + local LAST_ERR=/tmp/tmp.uZPFxPmiTy + local exit_status=0 + cat /tmp/tmp.JMSKJvP72V/cr_1.16.1_haproxy.yaml ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.AeS28pB0l5 perconaxtradbcluster.pxc.percona.com/upgrade-haproxy created + cat /tmp/tmp.uZPFxPmiTy + rm /tmp/tmp.AeS28pB0l5 /tmp/tmp.uZPFxPmiTy + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy upgrade-haproxy ++ local target_cluster=upgrade-haproxy +++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.f41r8zZWFq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2roNIwvDHH +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.f41r8zZWFq +++ cat /tmp/tmp.2roNIwvDHH +++ rm /tmp/tmp.f41r8zZWFq /tmp/tmp.2roNIwvDHH +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo upgrade-haproxy-haproxy ++ return + local proxy=upgrade-haproxy-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-27703 ++ mktemp + local LAST_OUT=/tmp/tmp.PczpBqPK5v ++ mktemp + local LAST_ERR=/tmp/tmp.60D1Dy9lw4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-27703 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-27703 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n upgrade-haproxy-27703 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.PczpBqPK5v + cat /tmp/tmp.60D1Dy9lw4 error: no matching resources found + rm /tmp/tmp.PczpBqPK5v /tmp/tmp.60D1Dy9lw4 + return 1 + true + wait_for_running upgrade-haproxy-haproxy 1 + local name=upgrade-haproxy-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-haproxy-0 480 + local pod=upgrade-haproxy-haproxy-0 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/upgrade-haproxy-haproxy-0 condition met waiting for pod/upgrade-haproxy-haproxy-0 to become ReadyDefaulted container "haproxy" out of: haproxy, pxc-monit, pxc-init (init), haproxy-init (init) .Ok + wait_for_running upgrade-haproxy-pxc 3 + local name=upgrade-haproxy-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-0 480 + local pod=upgrade-haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-0 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-0 condition met waiting for pod/upgrade-haproxy-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-1 480 + local pod=upgrade-haproxy-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-1 condition met waiting for pod/upgrade-haproxy-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-2 480 + local pod=upgrade-haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-2 condition met waiting for pod/upgrade-haproxy-pxc-2 to become Ready.Ok + sleep 30 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h upgrade-haproxy-haproxy -uroot -proot_password -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h upgrade-haproxy-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fWmw3nz7Mb +++ mktemp ++ local LAST_ERR=/tmp/tmp.FpPOWCmwtm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.fWmw3nz7Mb ++ cat /tmp/tmp.FpPOWCmwtm ++ rm /tmp/tmp.fWmw3nz7Mb /tmp/tmp.FpPOWCmwtm ++ return 0 + client_pod=pxc-client-6b96564c8f-kxxsp + wait_pod pxc-client-6b96564c8f-kxxsp + local pod=pxc-client-6b96564c8f-kxxsp + local max_retry=480 + local ns= ++ echo pxc-client-6b96564c8f-kxxsp ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6b96564c8f-kxxsp condition met waiting for pod/pxc-client-6b96564c8f-kxxsp to become Ready.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h upgrade-haproxy-haproxy -uroot -proot_password -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h upgrade-haproxy-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zRozCmoNv4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.c2nioWZpWM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zRozCmoNv4 ++ cat /tmp/tmp.c2nioWZpWM ++ rm /tmp/tmp.zRozCmoNv4 /tmp/tmp.c2nioWZpWM ++ return 0 + client_pod=pxc-client-6b96564c8f-kxxsp + wait_pod pxc-client-6b96564c8f-kxxsp + local pod=pxc-client-6b96564c8f-kxxsp + local max_retry=480 + local ns= ++ echo pxc-client-6b96564c8f-kxxsp ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6b96564c8f-kxxsp condition met waiting for pod/pxc-client-6b96564c8f-kxxsp to become Ready.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ioHjnvjCky +++ mktemp ++ local LAST_ERR=/tmp/tmp.cf8n31bAtb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ioHjnvjCky ++ cat /tmp/tmp.cf8n31bAtb ++ rm /tmp/tmp.ioHjnvjCky /tmp/tmp.cf8n31bAtb ++ return 0 + client_pod=pxc-client-6b96564c8f-kxxsp + wait_pod pxc-client-6b96564c8f-kxxsp + local pod=pxc-client-6b96564c8f-kxxsp + local max_retry=480 + local ns= ++ echo pxc-client-6b96564c8f-kxxsp ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6b96564c8f-kxxsp condition met waiting for pod/pxc-client-6b96564c8f-kxxsp to become Ready.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.JMSKJvP72V/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.JMSKJvP72V/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BRvhEyVOMl +++ mktemp ++ local LAST_ERR=/tmp/tmp.BSvlPcGaMI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.BRvhEyVOMl ++ cat /tmp/tmp.BSvlPcGaMI ++ rm /tmp/tmp.BRvhEyVOMl /tmp/tmp.BSvlPcGaMI ++ return 0 + client_pod=pxc-client-6b96564c8f-kxxsp + wait_pod pxc-client-6b96564c8f-kxxsp + local pod=pxc-client-6b96564c8f-kxxsp + local max_retry=480 + local ns= ++ echo pxc-client-6b96564c8f-kxxsp ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6b96564c8f-kxxsp condition met waiting for pod/pxc-client-6b96564c8f-kxxsp to become Ready.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.JMSKJvP72V/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.JMSKJvP72V/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XWO39tRQA9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NoWeKizUOU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XWO39tRQA9 ++ cat /tmp/tmp.NoWeKizUOU ++ rm /tmp/tmp.XWO39tRQA9 /tmp/tmp.NoWeKizUOU ++ return 0 + client_pod=pxc-client-6b96564c8f-kxxsp + wait_pod pxc-client-6b96564c8f-kxxsp + local pod=pxc-client-6b96564c8f-kxxsp + local max_retry=480 + local ns= ++ echo pxc-client-6b96564c8f-kxxsp ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6b96564c8f-kxxsp condition met waiting for pod/pxc-client-6b96564c8f-kxxsp to become Ready.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.JMSKJvP72V/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.JMSKJvP72V/select-1.sql ++ is_keyring_plugin_in_use upgrade-haproxy ++ local cluster=upgrade-haproxy ++ kubectl_bin exec -it upgrade-haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dnS9dkiano +++ mktemp ++ local LAST_ERR=/tmp/tmp.Chq9recmty ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it upgrade-haproxy-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dnS9dkiano ++ cat /tmp/tmp.Chq9recmty Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.dnS9dkiano /tmp/tmp.Chq9recmty ++ return 0 + '[' '' ']' + compare_generation 1 haproxy upgrade-haproxy + local generation=1 + local proxy=haproxy + local cluster=upgrade-haproxy + local current_generation + [[ haproxy == \h\a\p\r\o\x\y ]] + containers=(pxc haproxy) + for container in '"${containers[@]}"' + check_generation 1 pxc upgrade-haproxy + local generation=1 + local container=pxc + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4HewfjUiUZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.eC3pMUsKKS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4HewfjUiUZ ++ cat /tmp/tmp.eC3pMUsKKS ++ rm /tmp/tmp.4HewfjUiUZ /tmp/tmp.eC3pMUsKKS ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + for container in '"${containers[@]}"' + check_generation 1 haproxy upgrade-haproxy + local generation=1 + local container=haproxy + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-haproxy -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cH0aQyQ8jl +++ mktemp ++ local LAST_ERR=/tmp/tmp.djB3gfOfzb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-haproxy -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.cH0aQyQ8jl ++ cat /tmp/tmp.djB3gfOfzb ++ rm /tmp/tmp.cH0aQyQ8jl /tmp/tmp.djB3gfOfzb ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + run_backup upgrade-haproxy on-demand-backup-minio + local cluster=upgrade-haproxy + local backup1=on-demand-backup-minio + desc 'make backup on-demand-backup-minio' + set +o xtrace ----------------------------------------------------------------------------------- make backup on-demand-backup-minio ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/conf/on-demand-backup-minio.yml ++ mktemp + local LAST_OUT=/tmp/tmp.tX2sjl3QJX ++ mktemp + local LAST_ERR=/tmp/tmp.Xhr0YQErKv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/conf/on-demand-backup-minio.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tX2sjl3QJX perconaxtradbclusterbackup.pxc.percona.com/on-demand-backup-minio created + cat /tmp/tmp.Xhr0YQErKv + rm /tmp/tmp.tX2sjl3QJX /tmp/tmp.Xhr0YQErKv + return 0 + wait_backup on-demand-backup-minio + local backup=on-demand-backup-minio + local status=Succeeded + set +o xtrace on-demand-backup-minio...............Succeeded + desc 'upgrade operator' + set +o xtrace ----------------------------------------------------------------------------------- upgrade operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.FdfvC6fy8H ++ mktemp + local LAST_ERR=/tmp/tmp.whP6khiSWB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FdfvC6fy8H customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.whP6khiSWB + rm /tmp/tmp.FdfvC6fy8H /tmp/tmp.whP6khiSWB + return 0 + [[ -n pxc-operator ]] + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.JIzZQCQnU2 ++ mktemp + local LAST_ERR=/tmp/tmp.B5Unl2zWl5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.JIzZQCQnU2 clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.B5Unl2zWl5 + rm /tmp/tmp.JIzZQCQnU2 /tmp/tmp.B5Unl2zWl5 + return 0 + kubectl_bin patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-1949-00b60e7e"}]}}}}' -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.NuJpHPKEvI ++ mktemp + local LAST_ERR=/tmp/tmp.rZENhW2YPY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-1949-00b60e7e"}]}}}}' -n pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NuJpHPKEvI deployment.apps/percona-xtradb-cluster-operator patched + cat /tmp/tmp.rZENhW2YPY + rm /tmp/tmp.NuJpHPKEvI /tmp/tmp.rZENhW2YPY + return 0 + kubectl_bin rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ghyFYAlwFq ++ mktemp + local LAST_ERR=/tmp/tmp.1qWf2VmXH8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ghyFYAlwFq Waiting for deployment "percona-xtradb-cluster-operator" rollout to finish: 0 of 1 updated replicas are available... deployment "percona-xtradb-cluster-operator" successfully rolled out + cat /tmp/tmp.1qWf2VmXH8 + rm /tmp/tmp.ghyFYAlwFq /tmp/tmp.1qWf2VmXH8 + return 0 + sleep 10 + desc 'wait for operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- wait for operator upgrade ----------------------------------------------------------------------------------- + local i=0 + local max=60 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ grep -vc NAME ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gEUBzx3KSo +++ mktemp ++ local LAST_ERR=/tmp/tmp.rBcGHdis3H ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gEUBzx3KSo ++ cat /tmp/tmp.rBcGHdis3H ++ rm /tmp/tmp.gEUBzx3KSo /tmp/tmp.rBcGHdis3H ++ return 0 + [[ 1 -eq 1 ]] + '[' -n pxc-operator ']' ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.PX00NTshm3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RHVYErYYeU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PX00NTshm3 ++ cat /tmp/tmp.RHVYErYYeU ++ rm /tmp/tmp.PX00NTshm3 /tmp/tmp.RHVYErYYeU ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.wL3lPSoySQ ++ mktemp + local LAST_ERR=/tmp/tmp.Nk52In9p5c + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wL3lPSoySQ Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8" modified. + cat /tmp/tmp.Nk52In9p5c + rm /tmp/tmp.wL3lPSoySQ /tmp/tmp.Nk52In9p5c + return 0 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ grep perconalab/percona-xtradb-cluster-operator:PR-1949-00b60e7e ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7ag8MDoiiL +++ mktemp ++ local LAST_ERR=/tmp/tmp.yr0NOxXCow ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7ag8MDoiiL ++ cat /tmp/tmp.yr0NOxXCow ++ rm /tmp/tmp.7ag8MDoiiL /tmp/tmp.yr0NOxXCow ++ return 0 + wait_pod percona-xtradb-cluster-operator-58c6bb8d8-2sfps + local pod=percona-xtradb-cluster-operator-58c6bb8d8-2sfps + local max_retry=480 + local ns= ++ echo percona-xtradb-cluster-operator-58c6bb8d8-2sfps ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-58c6bb8d8-2sfps condition met waiting for pod/percona-xtradb-cluster-operator-58c6bb8d8-2sfps to become Ready.Ok ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.KVAP3gc27J +++ mktemp ++ local LAST_ERR=/tmp/tmp.P5xy9m1w4a ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.KVAP3gc27J ++ cat /tmp/tmp.P5xy9m1w4a ++ rm /tmp/tmp.KVAP3gc27J /tmp/tmp.P5xy9m1w4a ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8 --namespace=upgrade-haproxy-27703 ++ mktemp + local LAST_OUT=/tmp/tmp.I7gWKyLHUN ++ mktemp + local LAST_ERR=/tmp/tmp.s2THvjYcS3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8 --namespace=upgrade-haproxy-27703 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.I7gWKyLHUN Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1949-00b60e7e-1-cluster8" modified. + cat /tmp/tmp.s2THvjYcS3 + rm /tmp/tmp.I7gWKyLHUN /tmp/tmp.s2THvjYcS3 + return 0 + desc 'check images and generation after operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after operator upgrade ----------------------------------------------------------------------------------- + check_pxc_liveness upgrade-haproxy 3 + local cluster=upgrade-haproxy + local cluster_size=3 + wait_cluster_consistency upgrade-haproxy 3 + local cluster_name=upgrade-haproxy + local cluster_size=3 + local proxy_size= + '[' -z '' ']' ++ get_proxy_size upgrade-haproxy ++ local cluster=upgrade-haproxy +++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BjNHdDpr0w ++++ mktemp +++ local LAST_ERR=/tmp/tmp.a87SGjJlPF +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.BjNHdDpr0w +++ cat /tmp/tmp.a87SGjJlPF +++ rm /tmp/tmp.BjNHdDpr0w /tmp/tmp.a87SGjJlPF +++ return 0 ++ [[ true == \t\r\u\e ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.size}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iyZYuYV46p +++ mktemp ++ local LAST_ERR=/tmp/tmp.eu1M39WBay ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.size}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.iyZYuYV46p ++ cat /tmp/tmp.eu1M39WBay ++ rm /tmp/tmp.iyZYuYV46p /tmp/tmp.eu1M39WBay ++ return 0 ++ return + proxy_size=3 + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c2J6WavmUz +++ mktemp ++ local LAST_ERR=/tmp/tmp.4kLQfoJ1XT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.c2J6WavmUz ++ cat /tmp/tmp.4kLQfoJ1XT ++ rm /tmp/tmp.c2J6WavmUz /tmp/tmp.4kLQfoJ1XT ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zOIEm4mtCd +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZYlhAr0CCD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zOIEm4mtCd ++ cat /tmp/tmp.ZYlhAr0CCD ++ rm /tmp/tmp.zOIEm4mtCd /tmp/tmp.ZYlhAr0CCD ++ return 0 + [[ '' == \3 ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 0 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rzt2QL0OMh +++ mktemp ++ local LAST_ERR=/tmp/tmp.vRYMYkwmZR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rzt2QL0OMh ++ cat /tmp/tmp.vRYMYkwmZR ++ rm /tmp/tmp.rzt2QL0OMh /tmp/tmp.vRYMYkwmZR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 1 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bDso1K4QcT +++ mktemp ++ local LAST_ERR=/tmp/tmp.FCIxlO5r0R ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bDso1K4QcT ++ cat /tmp/tmp.FCIxlO5r0R ++ rm /tmp/tmp.bDso1K4QcT /tmp/tmp.FCIxlO5r0R ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 2 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4POpm08cPZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.fMTgbbhkam ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4POpm08cPZ ++ cat /tmp/tmp.fMTgbbhkam ++ rm /tmp/tmp.4POpm08cPZ /tmp/tmp.fMTgbbhkam ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 3 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7ySfxkkdmv +++ mktemp ++ local LAST_ERR=/tmp/tmp.GtPWyofYKL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7ySfxkkdmv ++ cat /tmp/tmp.GtPWyofYKL ++ rm /tmp/tmp.7ySfxkkdmv /tmp/tmp.GtPWyofYKL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 4 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bQDqNYBV1k +++ mktemp ++ local LAST_ERR=/tmp/tmp.apG94h2vpL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bQDqNYBV1k ++ cat /tmp/tmp.apG94h2vpL ++ rm /tmp/tmp.bQDqNYBV1k /tmp/tmp.apG94h2vpL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 5 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rMurqwzQim +++ mktemp ++ local LAST_ERR=/tmp/tmp.ywymIhJhHC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.rMurqwzQim ++ cat /tmp/tmp.ywymIhJhHC ++ rm /tmp/tmp.rMurqwzQim /tmp/tmp.ywymIhJhHC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 6 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hH5BaNGYtp +++ mktemp ++ local LAST_ERR=/tmp/tmp.xnBayLNEKT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.hH5BaNGYtp ++ cat /tmp/tmp.xnBayLNEKT ++ rm /tmp/tmp.hH5BaNGYtp /tmp/tmp.xnBayLNEKT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 7 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9TCk5jW26X +++ mktemp ++ local LAST_ERR=/tmp/tmp.NQfX38rmIE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9TCk5jW26X ++ cat /tmp/tmp.NQfX38rmIE ++ rm /tmp/tmp.9TCk5jW26X /tmp/tmp.NQfX38rmIE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 8 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xBFjnxlROl +++ mktemp ++ local LAST_ERR=/tmp/tmp.CTaFi3T2Fr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xBFjnxlROl ++ cat /tmp/tmp.CTaFi3T2Fr ++ rm /tmp/tmp.xBFjnxlROl /tmp/tmp.CTaFi3T2Fr ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4X2K9R7YmG +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rm1YDf6ayZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4X2K9R7YmG ++ cat /tmp/tmp.Rm1YDf6ayZ ++ rm /tmp/tmp.4X2K9R7YmG /tmp/tmp.Rm1YDf6ayZ ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine upgrade-haproxy +++ local cluster_name=upgrade-haproxy ++++ get_proxy upgrade-haproxy ++++ local target_cluster=upgrade-haproxy +++++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.6FFEerEkTA ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.x5nAL5448z +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.6FFEerEkTA +++++ cat /tmp/tmp.x5nAL5448z +++++ rm /tmp/tmp.6FFEerEkTA /tmp/tmp.x5nAL5448z +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo upgrade-haproxy-haproxy ++++ return +++ local cluster_proxy=upgrade-haproxy-haproxy +++ echo haproxy ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WE99MIp5rL +++ mktemp ++ local LAST_ERR=/tmp/tmp.lngU9MfVPg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WE99MIp5rL ++ cat /tmp/tmp.lngU9MfVPg ++ rm /tmp/tmp.WE99MIp5rL /tmp/tmp.lngU9MfVPg ++ return 0 + [[ 3 == \3 ]] + wait_for_running upgrade-haproxy-pxc 3 + local name=upgrade-haproxy-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-0 480 + local pod=upgrade-haproxy-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-0 condition met waiting for pod/upgrade-haproxy-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-1 480 + local pod=upgrade-haproxy-pxc-1 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo upgrade-haproxy-pxc-1 + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-1 condition met waiting for pod/upgrade-haproxy-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-haproxy-pxc-2 480 + local pod=upgrade-haproxy-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-haproxy-pxc-2 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/upgrade-haproxy-pxc-2 condition met waiting for pod/upgrade-haproxy-pxc-2 to become Ready.Ok ++ seq 0 2 + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-0.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HzIDx2yYk4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.UVDczwBf8l ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.HzIDx2yYk4 ++ cat /tmp/tmp.UVDczwBf8l ++ rm /tmp/tmp.HzIDx2yYk4 /tmp/tmp.UVDczwBf8l ++ return 0 + client_pod=pxc-client-6b96564c8f-kxxsp + wait_pod pxc-client-6b96564c8f-kxxsp + local pod=pxc-client-6b96564c8f-kxxsp + local max_retry=480 + local ns= ++ echo pxc-client-6b96564c8f-kxxsp ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6b96564c8f-kxxsp condition met waiting for pod/pxc-client-6b96564c8f-kxxsp to become Ready.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.JMSKJvP72V/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.JMSKJvP72V/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-1.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7NZUQj7Dfj +++ mktemp ++ local LAST_ERR=/tmp/tmp.rDEtXFDY3B ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7NZUQj7Dfj ++ cat /tmp/tmp.rDEtXFDY3B ++ rm /tmp/tmp.7NZUQj7Dfj /tmp/tmp.rDEtXFDY3B ++ return 0 + client_pod=pxc-client-6b96564c8f-kxxsp + wait_pod pxc-client-6b96564c8f-kxxsp + local pod=pxc-client-6b96564c8f-kxxsp + local max_retry=480 + local ns= ++ echo pxc-client-6b96564c8f-kxxsp ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6b96564c8f-kxxsp condition met waiting for pod/pxc-client-6b96564c8f-kxxsp to become Ready.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.JMSKJvP72V/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.JMSKJvP72V/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.4 ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1-80.sql ]] + [[ percona/percona-xtradb-cluster:8.0.39-30.1 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-haproxy-pxc-2.upgrade-haproxy-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1nO2j3EwIO +++ mktemp ++ local LAST_ERR=/tmp/tmp.t3tPNDdIFd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1nO2j3EwIO ++ cat /tmp/tmp.t3tPNDdIFd ++ rm /tmp/tmp.1nO2j3EwIO /tmp/tmp.t3tPNDdIFd ++ return 0 + client_pod=pxc-client-6b96564c8f-kxxsp + wait_pod pxc-client-6b96564c8f-kxxsp + local pod=pxc-client-6b96564c8f-kxxsp + local max_retry=480 + local ns= ++ echo pxc-client-6b96564c8f-kxxsp ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6b96564c8f-kxxsp condition met waiting for pod/pxc-client-6b96564c8f-kxxsp to become Ready.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.JMSKJvP72V/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1949/e2e-tests/upgrade-haproxy/compare/select-1.sql /tmp/tmp.JMSKJvP72V/select-1.sql ++ kubectl_bin get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eaHrtt5iud +++ mktemp ++ local LAST_ERR=/tmp/tmp.aDj4gFlE1j ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.eaHrtt5iud ++ cat /tmp/tmp.aDj4gFlE1j ++ rm /tmp/tmp.eaHrtt5iud /tmp/tmp.aDj4gFlE1j ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:PR-1949-00b60e7e == perconalab/percona-xtradb-cluster-operator:PR-1949-00b60e7e ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.proxysql.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dZTYmPdhmx +++ mktemp ++ local LAST_ERR=/tmp/tmp.l1HVKD6b0K ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.proxysql.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dZTYmPdhmx ++ cat /tmp/tmp.l1HVKD6b0K ++ rm /tmp/tmp.dZTYmPdhmx /tmp/tmp.l1HVKD6b0K ++ return 0 + [[ percona/proxysql2:2.7.1 == percona/proxysql2:2.7.1 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Eb7FjFwG11 +++ mktemp ++ local LAST_ERR=/tmp/tmp.VbtXM2YA8d ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.haproxy.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Eb7FjFwG11 ++ cat /tmp/tmp.VbtXM2YA8d ++ rm /tmp/tmp.Eb7FjFwG11 /tmp/tmp.VbtXM2YA8d ++ return 0 + [[ percona/haproxy:2.8.11 == percona/haproxy:2.8.11 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zkuBpiuDrO +++ mktemp ++ local LAST_ERR=/tmp/tmp.Wy7M9XNmeN ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zkuBpiuDrO ++ cat /tmp/tmp.Wy7M9XNmeN ++ rm /tmp/tmp.zkuBpiuDrO /tmp/tmp.Wy7M9XNmeN ++ return 0 + [[ percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35 == percona/percona-xtradb-cluster-operator:1.16.1-pxc8.0-backup-pxb8.0.35 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VgUf2NiUOE +++ mktemp ++ local LAST_ERR=/tmp/tmp.wXZUiY8j26 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.VgUf2NiUOE ++ cat /tmp/tmp.wXZUiY8j26 ++ rm /tmp/tmp.VgUf2NiUOE /tmp/tmp.wXZUiY8j26 ++ return 0 + [[ percona/pmm-client:2.44.0 == percona/pmm-client:2.44.0 ]] ++ kubectl_bin get pxc upgrade-haproxy -o 'jsonpath={.spec.pxc.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0NK11gvctG +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ht0peBZvt0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc upgrade-haproxy -o 'jsonpath={.spec.pxc.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0NK11gvctG ++ cat /tmp/tmp.Ht0peBZvt0 ++ rm /tmp/tmp.0NK11gvctG /tmp/tmp.Ht0peBZvt0 ++ return 0 + [[ percona/percona-xtradb-cluster:8.0.39-30.1 == percona/percona-xtradb-cluster:8.0.39-30.1 ]] + : Operator image has been updated correctly + compare_generation 1 haproxy upgrade-haproxy + local generation=1 + local proxy=haproxy + local cluster=upgrade-haproxy + local current_generation + [[ haproxy == \h\a\p\r\o\x\y ]] + containers=(pxc haproxy) + for container in '"${containers[@]}"' + check_generation 1 pxc upgrade-haproxy + local generation=1 + local container=pxc + local cluster=upgrade-haproxy + local current_generation ++ kubectl_bin get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OW3rjpt8C0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.enpRxx5Iqu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-haproxy-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OW3rjpt8C0 ++ cat /tmp/tmp.enpRxx5Iqu ++ rm /tmp/tmp.OW3rjpt8C0 /tmp/tmp.enpRxx5Iqu ++ return 0 + current_generation=2 + [[ 1 != \2 ]] + echo 'Generation for resource pxc is: 2, but should be: 1' Generation for resource pxc is: 2, but should be: 1 + exit 1