++ echo 'Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/logs/upgrade-proxysql.log' Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/logs/upgrade-proxysql.log ++ '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: No Auth Provider found for name "gcp" +++ grep '\-eks\-' +++ jq -r .serverVersion.gitVersion +++ kubectl version -o json ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' ++ KUBE_VERSION=1.20 +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' +++ helm version -c ++ HELM_VERSION=v3.8.1 ++ '[' v3 == v2 ']' + CLUSTER=upgrade-proxysql + CLUSTER_SIZE=3 + TARGET_API=pxc.percona.com/v1-11-0 ++ /usr/bin/sed s/-/./g ++ echo -n 1-11-0 + TARGET_OPERATOR_VER=1.11.0 + TARGET_IMAGE=perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a + TARGET_IMAGE_PXC=perconalab/percona-xtradb-cluster-operator:main-pxc8.0 + TARGET_IMAGE_PMM=perconalab/pmm-client:dev-latest + TARGET_IMAGE_PROXY=perconalab/percona-xtradb-cluster-operator:main-proxysql + TARGET_IMAGE_HAPROXY=perconalab/percona-xtradb-cluster-operator:main-haproxy + TARGET_IMAGE_BACKUP=perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 == *\p\e\r\c\o\n\a\-\x\t\r\a\d\b\-\c\l\u\s\t\e\r\-\o\p\e\r\a\t\o\r* ]] ++ echo -n perconalab/percona-xtradb-cluster-operator:main-pxc8.0 ++ /usr/bin/sed -r 's/.*([0-9].[0-9])$/\1/' + PXC_VER=8.0 ++ tail -n1 ++ sort -V ++ curl -s https://check.percona.com/versions/v1/pxc-operator ++ jq -r '.versions[].operator' + INIT_OPERATOR_VER=1.10.0 + [[ 1.10.0 == \1\.\1\1\.\0 ]] + GIT_TAG=v1.10.0 ++ curl -s 'https://check.percona.com/versions/v1/pxc-operator/1.10.0/latest?databaseVersion=8.0' + INIT_OPERATOR_IMAGES='{"versions":[{"product":"pxc-operator","operator":"1.10.0","matrix":{"mongod":{},"pxc":{"8.0.25-15.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.25-15.1","imageHash":"529e979c86442429e6feabef9a2d9fc362f4626146f208fbfac704e145a492dd","status":"recommended","critical":false}},"pmm":{"2.23.0":{"imagePath":"percona/pmm-client:2.23.0","imageHash":"8fa0e45f740fa8564cbfbdf5d9a5507a07e331f8f40ea022d3a64d7278478eac","status":"recommended","critical":false}},"proxysql":{"2.0.18-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25","imageHash":"b84701c47a11c6f5ca46481f25f1b6086c0a30014d05584c7987f1d42a17b584","status":"recommended","critical":false}},"haproxy":{"2.3.15":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25","imageHash":"62479be2a21192a3215f03d3f9541decd5ef1737741245ac33ee439915a15128","status":"recommended","critical":false}},"backup":{"8.0.25":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup","imageHash":"c3991f0959a3b4114d7ff629d9d3cdf0dc200c58443ca8ebb1446d8b1cbe416d","status":"recommended","critical":false}},"operator":{"1.10.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0","imageHash":"73d2266258b700a691db6196f4b5c830845d34d57bdef5be5ffbd45e88407309","status":"recommended","critical":false}},"logCollector":{"1.10.0-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-logcollector-8.0.25","imageHash":"d69dad98900532e2ad6d0bf12c34a148462816fa3ee4697e9b73efef7583901a","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{}}}]}' + OPERATOR_NAME=percona-xtradb-cluster-operator + API=pxc.percona.com/v1-10-0 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.10.0","matrix":{"mongod":{},"pxc":{"8.0.25-15.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.25-15.1","imageHash":"529e979c86442429e6feabef9a2d9fc362f4626146f208fbfac704e145a492dd","status":"recommended","critical":false}},"pmm":{"2.23.0":{"imagePath":"percona/pmm-client:2.23.0","imageHash":"8fa0e45f740fa8564cbfbdf5d9a5507a07e331f8f40ea022d3a64d7278478eac","status":"recommended","critical":false}},"proxysql":{"2.0.18-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25","imageHash":"b84701c47a11c6f5ca46481f25f1b6086c0a30014d05584c7987f1d42a17b584","status":"recommended","critical":false}},"haproxy":{"2.3.15":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25","imageHash":"62479be2a21192a3215f03d3f9541decd5ef1737741245ac33ee439915a15128","status":"recommended","critical":false}},"backup":{"8.0.25":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup","imageHash":"c3991f0959a3b4114d7ff629d9d3cdf0dc200c58443ca8ebb1446d8b1cbe416d","status":"recommended","critical":false}},"operator":{"1.10.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0","imageHash":"73d2266258b700a691db6196f4b5c830845d34d57bdef5be5ffbd45e88407309","status":"recommended","critical":false}},"logCollector":{"1.10.0-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-logcollector-8.0.25","imageHash":"d69dad98900532e2ad6d0bf12c34a148462816fa3ee4697e9b73efef7583901a","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{}}}]}' ++ jq -r '.versions[].matrix.operator[].imagePath' + IMAGE=percona/percona-xtradb-cluster-operator:1.10.0 ++ cut -d/ -f1 ++ echo perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a + [[ perconalab == \p\e\r\c\o\n\a\l\a\b ]] + IMAGE=perconalab/percona-xtradb-cluster-operator:1.10.0 ++ jq -r '.versions[].matrix.pxc[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.10.0","matrix":{"mongod":{},"pxc":{"8.0.25-15.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.25-15.1","imageHash":"529e979c86442429e6feabef9a2d9fc362f4626146f208fbfac704e145a492dd","status":"recommended","critical":false}},"pmm":{"2.23.0":{"imagePath":"percona/pmm-client:2.23.0","imageHash":"8fa0e45f740fa8564cbfbdf5d9a5507a07e331f8f40ea022d3a64d7278478eac","status":"recommended","critical":false}},"proxysql":{"2.0.18-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25","imageHash":"b84701c47a11c6f5ca46481f25f1b6086c0a30014d05584c7987f1d42a17b584","status":"recommended","critical":false}},"haproxy":{"2.3.15":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25","imageHash":"62479be2a21192a3215f03d3f9541decd5ef1737741245ac33ee439915a15128","status":"recommended","critical":false}},"backup":{"8.0.25":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup","imageHash":"c3991f0959a3b4114d7ff629d9d3cdf0dc200c58443ca8ebb1446d8b1cbe416d","status":"recommended","critical":false}},"operator":{"1.10.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0","imageHash":"73d2266258b700a691db6196f4b5c830845d34d57bdef5be5ffbd45e88407309","status":"recommended","critical":false}},"logCollector":{"1.10.0-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-logcollector-8.0.25","imageHash":"d69dad98900532e2ad6d0bf12c34a148462816fa3ee4697e9b73efef7583901a","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{}}}]}' + IMAGE_PXC=percona/percona-xtradb-cluster:8.0.25-15.1 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.10.0","matrix":{"mongod":{},"pxc":{"8.0.25-15.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.25-15.1","imageHash":"529e979c86442429e6feabef9a2d9fc362f4626146f208fbfac704e145a492dd","status":"recommended","critical":false}},"pmm":{"2.23.0":{"imagePath":"percona/pmm-client:2.23.0","imageHash":"8fa0e45f740fa8564cbfbdf5d9a5507a07e331f8f40ea022d3a64d7278478eac","status":"recommended","critical":false}},"proxysql":{"2.0.18-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25","imageHash":"b84701c47a11c6f5ca46481f25f1b6086c0a30014d05584c7987f1d42a17b584","status":"recommended","critical":false}},"haproxy":{"2.3.15":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25","imageHash":"62479be2a21192a3215f03d3f9541decd5ef1737741245ac33ee439915a15128","status":"recommended","critical":false}},"backup":{"8.0.25":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup","imageHash":"c3991f0959a3b4114d7ff629d9d3cdf0dc200c58443ca8ebb1446d8b1cbe416d","status":"recommended","critical":false}},"operator":{"1.10.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0","imageHash":"73d2266258b700a691db6196f4b5c830845d34d57bdef5be5ffbd45e88407309","status":"recommended","critical":false}},"logCollector":{"1.10.0-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-logcollector-8.0.25","imageHash":"d69dad98900532e2ad6d0bf12c34a148462816fa3ee4697e9b73efef7583901a","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{}}}]}' ++ jq -r '.versions[].matrix.pmm[].imagePath' + IMAGE_PMM=percona/pmm-client:2.23.0 ++ jq -r '.versions[].matrix.proxysql[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.10.0","matrix":{"mongod":{},"pxc":{"8.0.25-15.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.25-15.1","imageHash":"529e979c86442429e6feabef9a2d9fc362f4626146f208fbfac704e145a492dd","status":"recommended","critical":false}},"pmm":{"2.23.0":{"imagePath":"percona/pmm-client:2.23.0","imageHash":"8fa0e45f740fa8564cbfbdf5d9a5507a07e331f8f40ea022d3a64d7278478eac","status":"recommended","critical":false}},"proxysql":{"2.0.18-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25","imageHash":"b84701c47a11c6f5ca46481f25f1b6086c0a30014d05584c7987f1d42a17b584","status":"recommended","critical":false}},"haproxy":{"2.3.15":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25","imageHash":"62479be2a21192a3215f03d3f9541decd5ef1737741245ac33ee439915a15128","status":"recommended","critical":false}},"backup":{"8.0.25":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup","imageHash":"c3991f0959a3b4114d7ff629d9d3cdf0dc200c58443ca8ebb1446d8b1cbe416d","status":"recommended","critical":false}},"operator":{"1.10.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0","imageHash":"73d2266258b700a691db6196f4b5c830845d34d57bdef5be5ffbd45e88407309","status":"recommended","critical":false}},"logCollector":{"1.10.0-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-logcollector-8.0.25","imageHash":"d69dad98900532e2ad6d0bf12c34a148462816fa3ee4697e9b73efef7583901a","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{}}}]}' + IMAGE_PROXY=percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25 ++ jq -r '.versions[].matrix.haproxy[].imagePath' ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.10.0","matrix":{"mongod":{},"pxc":{"8.0.25-15.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.25-15.1","imageHash":"529e979c86442429e6feabef9a2d9fc362f4626146f208fbfac704e145a492dd","status":"recommended","critical":false}},"pmm":{"2.23.0":{"imagePath":"percona/pmm-client:2.23.0","imageHash":"8fa0e45f740fa8564cbfbdf5d9a5507a07e331f8f40ea022d3a64d7278478eac","status":"recommended","critical":false}},"proxysql":{"2.0.18-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25","imageHash":"b84701c47a11c6f5ca46481f25f1b6086c0a30014d05584c7987f1d42a17b584","status":"recommended","critical":false}},"haproxy":{"2.3.15":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25","imageHash":"62479be2a21192a3215f03d3f9541decd5ef1737741245ac33ee439915a15128","status":"recommended","critical":false}},"backup":{"8.0.25":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup","imageHash":"c3991f0959a3b4114d7ff629d9d3cdf0dc200c58443ca8ebb1446d8b1cbe416d","status":"recommended","critical":false}},"operator":{"1.10.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0","imageHash":"73d2266258b700a691db6196f4b5c830845d34d57bdef5be5ffbd45e88407309","status":"recommended","critical":false}},"logCollector":{"1.10.0-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-logcollector-8.0.25","imageHash":"d69dad98900532e2ad6d0bf12c34a148462816fa3ee4697e9b73efef7583901a","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{}}}]}' + IMAGE_HAPROXY=percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25 ++ echo '{"versions":[{"product":"pxc-operator","operator":"1.10.0","matrix":{"mongod":{},"pxc":{"8.0.25-15.1":{"imagePath":"percona/percona-xtradb-cluster:8.0.25-15.1","imageHash":"529e979c86442429e6feabef9a2d9fc362f4626146f208fbfac704e145a492dd","status":"recommended","critical":false}},"pmm":{"2.23.0":{"imagePath":"percona/pmm-client:2.23.0","imageHash":"8fa0e45f740fa8564cbfbdf5d9a5507a07e331f8f40ea022d3a64d7278478eac","status":"recommended","critical":false}},"proxysql":{"2.0.18-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25","imageHash":"b84701c47a11c6f5ca46481f25f1b6086c0a30014d05584c7987f1d42a17b584","status":"recommended","critical":false}},"haproxy":{"2.3.15":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25","imageHash":"62479be2a21192a3215f03d3f9541decd5ef1737741245ac33ee439915a15128","status":"recommended","critical":false}},"backup":{"8.0.25":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup","imageHash":"c3991f0959a3b4114d7ff629d9d3cdf0dc200c58443ca8ebb1446d8b1cbe416d","status":"recommended","critical":false}},"operator":{"1.10.0":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0","imageHash":"73d2266258b700a691db6196f4b5c830845d34d57bdef5be5ffbd45e88407309","status":"recommended","critical":false}},"logCollector":{"1.10.0-2":{"imagePath":"percona/percona-xtradb-cluster-operator:1.10.0-logcollector-8.0.25","imageHash":"d69dad98900532e2ad6d0bf12c34a148462816fa3ee4697e9b73efef7583901a","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{}}}]}' ++ jq -r '.versions[].matrix.backup[].imagePath' + IMAGE_BACKUP=percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup + [[ pxc.percona.com/v1-11-0 == \p\x\c\.\p\e\r\c\o\n\a\.\c\o\m\/\v\1\-\1\0\-\0 ]] + main + deploy_cert_manager + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.MwyJYaCgwH ++ mktemp + local LAST_ERR=/tmp/tmp.yDZPLM9X3w + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace cert-manager + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.MwyJYaCgwH namespace/cert-manager created + cat /tmp/tmp.yDZPLM9X3w + rm /tmp/tmp.MwyJYaCgwH /tmp/tmp.yDZPLM9X3w + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.qjaz8p6tzd ++ mktemp + local LAST_ERR=/tmp/tmp.JT8RAwVkZ1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.qjaz8p6tzd namespace/cert-manager labeled + cat /tmp/tmp.JT8RAwVkZ1 + rm /tmp/tmp.qjaz8p6tzd /tmp/tmp.JT8RAwVkZ1 + return 0 + kubectl_bin apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.5oEjpMTqRF ++ mktemp + local LAST_ERR=/tmp/tmp.swRNoJuaPY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml --validate=false + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.5oEjpMTqRF customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io created namespace/cert-manager configured serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrole.rbac.authorization.k8s.io/cert-manager-view created clusterrole.rbac.authorization.k8s.io/cert-manager-edit created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created role.rbac.authorization.k8s.io/cert-manager:leaderelection created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created + cat /tmp/tmp.swRNoJuaPY Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.5oEjpMTqRF /tmp/tmp.swRNoJuaPY + return 0 + sleep 60 + create_infra_gh upgrade-proxysql-20589 v1.10.0 + local ns=upgrade-proxysql-20589 + local git_tag=v1.10.0 + '[' -n pxc-operator ']' + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + '[' '!' -z '' ']' + kubectl_bin delete namespace pxc-operator + xargs kubectl delete ns + awk '{print$1}' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.NLALTBrPK5 + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^NAME' + local LAST_OUT=/tmp/tmp.ckEXVOiwSN ++ mktemp + local LAST_ERR=/tmp/tmp.UupbzkU3GM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.4CSC2XRtch + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete namespace pxc-operator + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.ckEXVOiwSN + cat /tmp/tmp.UupbzkU3GM + rm /tmp/tmp.ckEXVOiwSN /tmp/tmp.UupbzkU3GM + return 0 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace pxc-operator namespace "cert-manager" deleted + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.NLALTBrPK5 + cat /tmp/tmp.4CSC2XRtch Error from server (NotFound): namespaces "pxc-operator" not found + rm /tmp/tmp.NLALTBrPK5 /tmp/tmp.4CSC2XRtch + return 1 + : + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + set +o xtrace namespace/pxc-operator - Error from server (NotFound): namespaces "pxc-operator" not found + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.QZKop9esNL ++ mktemp + local LAST_ERR=/tmp/tmp.Qff5Zc0yzS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.QZKop9esNL namespace/pxc-operator created + cat /tmp/tmp.Qff5Zc0yzS + rm /tmp/tmp.QZKop9esNL /tmp/tmp.Qff5Zc0yzS + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.8ZrZ1VZktG +++ mktemp ++ local LAST_ERR=/tmp/tmp.jsudM0bXjX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.8ZrZ1VZktG ++ cat /tmp/tmp.jsudM0bXjX ++ rm /tmp/tmp.8ZrZ1VZktG /tmp/tmp.jsudM0bXjX ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.9Bdpl3w64B ++ mktemp + local LAST_ERR=/tmp/tmp.7jZ189Dx3L + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade --namespace=pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.9Bdpl3w64B Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade" modified. + cat /tmp/tmp.7jZ189Dx3L + rm /tmp/tmp.9Bdpl3w64B /tmp/tmp.7jZ189Dx3L + return 0 + deploy_operator_gh v1.10.0 + local git_tag=v1.10.0 + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- ++ kubectl_bin get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ADG79DEMPX +++ mktemp ++ local LAST_ERR=/tmp/tmp.xKm8q13ax1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get crds -o 'jsonpath={.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.ADG79DEMPX ++ cat /tmp/tmp.xKm8q13ax1 ++ rm /tmp/tmp.ADG79DEMPX /tmp/tmp.xKm8q13ax1 ++ return 0 + [[ -n '' ]] + kubectl_bin apply -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.10.0/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.SRsHwczOS9 ++ mktemp + local LAST_ERR=/tmp/tmp.MHmnAjkjbB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.10.0/deploy/crd.yaml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.SRsHwczOS9 + cat /tmp/tmp.MHmnAjkjbB + rm /tmp/tmp.SRsHwczOS9 /tmp/tmp.MHmnAjkjbB + return 0 + local rbac_yaml=rbac + local operator_yaml=operator.yaml + '[' -n pxc-operator ']' + rbac_yaml=cw-rbac + operator_yaml=cw-operator.yaml + apply_rbac_gh cw-rbac v1.10.0 + local operator_namespace=pxc-operator + local rbac=cw-rbac + local git_tag=v1.10.0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.10.0/deploy/cw-rbac.yaml + /usr/bin/sed -i -e 's^namespace: .*^namespace: pxc-operator^' /tmp/tmp.4DJIKxAFYN/rbac_v1.10.0.yaml + kubectl_bin apply -f /tmp/tmp.4DJIKxAFYN/rbac_v1.10.0.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Lt9q1FG8cN ++ mktemp + local LAST_ERR=/tmp/tmp.j8A37o7LgQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /tmp/tmp.4DJIKxAFYN/rbac_v1.10.0.yaml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.Lt9q1FG8cN clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator created serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator created + cat /tmp/tmp.j8A37o7LgQ + rm /tmp/tmp.Lt9q1FG8cN /tmp/tmp.j8A37o7LgQ + return 0 + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.10.0/deploy/cw-operator.yaml + /usr/bin/sed -i -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:1.10.0^' /tmp/tmp.4DJIKxAFYN/cw-operator.yaml_v1.10.0.yaml + kubectl_bin apply -f /tmp/tmp.4DJIKxAFYN/cw-operator.yaml_v1.10.0.yaml -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.3D70ZUFfEd ++ mktemp + local LAST_ERR=/tmp/tmp.mEeF1T7B6A + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /tmp/tmp.4DJIKxAFYN/cw-operator.yaml_v1.10.0.yaml -n pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.3D70ZUFfEd deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.mEeF1T7B6A + rm /tmp/tmp.3D70ZUFfEd /tmp/tmp.mEeF1T7B6A + return 0 + sleep 2 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.SIWOwJlalr +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ma14EZOyN9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.SIWOwJlalr ++ cat /tmp/tmp.Ma14EZOyN9 ++ rm /tmp/tmp.SIWOwJlalr /tmp/tmp.Ma14EZOyN9 ++ return 0 + wait_pod percona-xtradb-cluster-operator-6855f87d75-j4hn9 + local pod=percona-xtradb-cluster-operator-6855f87d75-j4hn9 + local max_retry=480 + local ns= ++ echo percona-xtradb-cluster-operator-6855f87d75-j4hn9 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace percona-xtradb-cluster-operator-6855f87d75-j4hn9..Ok + create_namespace upgrade-proxysql-20589 + local namespace=upgrade-proxysql-20589 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + '[' '!' -z '' ']' + kubectl_bin delete namespace upgrade-proxysql-20589 + xargs kubectl delete ns + awk '{print$1}' ++ mktemp + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.AB7vMrBraw ++ mktemp + local LAST_ERR=/tmp/tmp.Vnf62SdiSC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete namespace upgrade-proxysql-20589 + local LAST_OUT=/tmp/tmp.v3Ko7JgEhT ++ mktemp + local LAST_ERR=/tmp/tmp.ScFCAo1BRN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.v3Ko7JgEhT + cat /tmp/tmp.ScFCAo1BRN + rm /tmp/tmp.v3Ko7JgEhT /tmp/tmp.ScFCAo1BRN + return 0 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace upgrade-proxysql-20589 error: resource(s) were provided, but no name, label selector, or --all flag specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace upgrade-proxysql-20589 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.AB7vMrBraw + cat /tmp/tmp.Vnf62SdiSC Error from server (NotFound): namespaces "upgrade-proxysql-20589" not found + rm /tmp/tmp.AB7vMrBraw /tmp/tmp.Vnf62SdiSC + return 1 + : + wait_for_delete namespace/upgrade-proxysql-20589 + local res=namespace/upgrade-proxysql-20589 + set +o xtrace namespace/upgrade-proxysql-20589 - Error from server (NotFound): namespaces "upgrade-proxysql-20589" not found + kubectl_bin create namespace upgrade-proxysql-20589 ++ mktemp + local LAST_OUT=/tmp/tmp.yggWSCCW2Y ++ mktemp + local LAST_ERR=/tmp/tmp.c2LthThoy8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace upgrade-proxysql-20589 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.yggWSCCW2Y namespace/upgrade-proxysql-20589 created + cat /tmp/tmp.c2LthThoy8 + rm /tmp/tmp.yggWSCCW2Y /tmp/tmp.c2LthThoy8 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.GV1ldwmqhc +++ mktemp ++ local LAST_ERR=/tmp/tmp.lD6bB8jwmQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.GV1ldwmqhc ++ cat /tmp/tmp.lD6bB8jwmQ ++ rm /tmp/tmp.GV1ldwmqhc /tmp/tmp.lD6bB8jwmQ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade --namespace=upgrade-proxysql-20589 ++ mktemp + local LAST_OUT=/tmp/tmp.6UjmZqOIXR ++ mktemp + local LAST_ERR=/tmp/tmp.SGZrjZbskz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade --namespace=upgrade-proxysql-20589 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.6UjmZqOIXR Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade" modified. + cat /tmp/tmp.SGZrjZbskz + rm /tmp/tmp.6UjmZqOIXR /tmp/tmp.SGZrjZbskz + return 0 + apply_secrets + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ZBsLMTyqtw ++ mktemp + local LAST_ERR=/tmp/tmp.nIGObyH3uK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.ZBsLMTyqtw secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.nIGObyH3uK + rm /tmp/tmp.ZBsLMTyqtw /tmp/tmp.nIGObyH3uK + return 0 + local proxy=proxysql + local cr_yaml=/tmp/tmp.4DJIKxAFYN/cr_1.10.0_proxysql.yaml + prepare_cr_yaml /tmp/tmp.4DJIKxAFYN/cr_1.10.0_proxysql.yaml proxysql upgrade-proxysql 3 v1.10.0 + local cr_yaml=/tmp/tmp.4DJIKxAFYN/cr_1.10.0_proxysql.yaml + local proxy=proxysql + local cluster=upgrade-proxysql + local cluster_size=3 + local git_tag=v1.10.0 + yq w - spec.backup.image -- -backup + yq w - spec.backup.storages.minio.s3.credentialsSecret minio-secret + yq w - spec.backup.storages.minio.s3.region us-east-1 + yq w - spec.proxysql.size 3 + yq w - spec.backup.storages.minio.s3.bucket operator-testing + yq w - spec.backup.storages.minio.s3.endpointUrl http://minio-service:9000/ + yq w - spec.backup.storages.minio.type s3 + yq w - spec.sslSecretName some-name-ssl + yq w - spec.haproxy.size 3 + yq w - spec.vaultSecretName some-name-vault + yq w - spec.pxc.image -- -pxc + yq w - spec.proxysql.image -- -proxysql + yq w - spec.haproxy.image -- -haproxy + yq w - spec.secretsName my-cluster-secrets + yq w - spec.sslInternalSecretName some-name-ssl-internal + yq w - spec.upgradeOptions.apply disabled + yq w - spec.pxc.size 3 + yq w - metadata.name upgrade-proxysql + curl -s https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/v1.10.0/deploy/cr.yaml + [[ proxysql == \h\a\p\r\o\x\y ]] + yq w -i /tmp/tmp.4DJIKxAFYN/cr_1.10.0_proxysql.yaml spec.haproxy.enabled false + yq w -i /tmp/tmp.4DJIKxAFYN/cr_1.10.0_proxysql.yaml spec.proxysql.enabled true + spinup_pxc upgrade-proxysql /tmp/tmp.4DJIKxAFYN/cr_1.10.0_proxysql.yaml 3 30 /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets_without_tls.yml + local cluster=upgrade-proxysql + local config=/tmp/tmp.4DJIKxAFYN/cr_1.10.0_proxysql.yaml + local size=3 + local sleep=30 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets_without_tls.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets_without_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.IzwwxMztj0 ++ mktemp + local LAST_ERR=/tmp/tmp.jzCjSokfyx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets_without_tls.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.IzwwxMztj0 secret/my-cluster-secrets created + cat /tmp/tmp.jzCjSokfyx + rm /tmp/tmp.IzwwxMztj0 /tmp/tmp.jzCjSokfyx + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-10-0#' + local LAST_OUT=/tmp/tmp.HzkjRk0DAN + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:2.23.0#' + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-proxysql-20589~ + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:1.10.0#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.25-15.1#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_ERR=/tmp/tmp.15FthzgkZH + local exit_status=0 + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.25-15.1#' ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.HzkjRk0DAN deployment.apps/pxc-client created + cat /tmp/tmp.15FthzgkZH + rm /tmp/tmp.HzkjRk0DAN /tmp/tmp.15FthzgkZH + return 0 + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 5\.7 ]] + apply_config /tmp/tmp.4DJIKxAFYN/cr_1.10.0_proxysql.yaml + '[' -z '' ']' + cat_config /tmp/tmp.4DJIKxAFYN/cr_1.10.0_proxysql.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.GIV3X5tYqj + cat /tmp/tmp.4DJIKxAFYN/cr_1.10.0_proxysql.yaml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-10-0#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25#' + local LAST_ERR=/tmp/tmp.jsFmDtVPje + local exit_status=0 + /usr/bin/sed -e 's#image:.*-backup$#image: percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup#' ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-pmm$#image: percona/pmm-client:2.23.0#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.upgrade-proxysql-20589~ + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: percona/percona-xtradb-cluster:8.0.25-15.1#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: percona/percona-xtradb-cluster:8.0.25-15.1#' + for i in '$(seq 0 2)' + kubectl apply -f - + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:1.10.0#' + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.GIV3X5tYqj perconaxtradbcluster.pxc.percona.com/upgrade-proxysql created + cat /tmp/tmp.jsFmDtVPje + rm /tmp/tmp.GIV3X5tYqj /tmp/tmp.jsFmDtVPje + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy upgrade-proxysql ++ local target_cluster=upgrade-proxysql +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xTUF3BcQgH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.z4886KE3qJ +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.xTUF3BcQgH +++ cat /tmp/tmp.z4886KE3qJ +++ rm /tmp/tmp.xTUF3BcQgH /tmp/tmp.z4886KE3qJ +++ return 0 ++ [[ false == \t\r\u\e ]] +++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.g2kVkeXGYv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FzKjeyh5zx +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.g2kVkeXGYv +++ cat /tmp/tmp.FzKjeyh5zx +++ rm /tmp/tmp.g2kVkeXGYv /tmp/tmp.FzKjeyh5zx +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo upgrade-proxysql-proxysql ++ return + local proxy=upgrade-proxysql-proxysql + wait_for_running upgrade-proxysql-proxysql 1 + local name=upgrade-proxysql-proxysql + let last_pod=0 + : + local max_retry=480 ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-proxysql-0 480 + local pod=upgrade-proxysql-proxysql-0 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-proxysql-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=proxysql + set +o xtrace upgrade-proxysql-proxysql-0..................Ok + wait_for_running upgrade-proxysql-pxc 3 + local name=upgrade-proxysql-pxc + let last_pod=2 + local max_retry=480 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-0 480 + local pod=upgrade-proxysql-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace upgrade-proxysql-pxc-0...........................Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-1 480 + local pod=upgrade-proxysql-pxc-1 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo upgrade-proxysql-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace upgrade-proxysql-pxc-1..........................................Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-2 480 + local pod=upgrade-proxysql-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-2 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace upgrade-proxysql-pxc-2.........................................Ok + sleep 30 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h upgrade-proxysql-proxysql -uroot -proot_password' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h upgrade-proxysql-proxysql -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zirWuYcdjI +++ mktemp ++ local LAST_ERR=/tmp/tmp.O0gE3ba0T9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.zirWuYcdjI ++ cat /tmp/tmp.O0gE3ba0T9 ++ rm /tmp/tmp.zirWuYcdjI /tmp/tmp.O0gE3ba0T9 ++ return 0 + client_pod=pxc-client-6c8cd976cc-cs7jc + wait_pod pxc-client-6c8cd976cc-cs7jc + local pod=pxc-client-6c8cd976cc-cs7jc + local max_retry=480 + local ns= ++ echo pxc-client-6c8cd976cc-cs7jc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-6c8cd976cc-cs7jc.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h upgrade-proxysql-proxysql -uroot -proot_password' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h upgrade-proxysql-proxysql -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WUSzfrU032 +++ mktemp ++ local LAST_ERR=/tmp/tmp.U6rqoK1p6b ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.WUSzfrU032 ++ cat /tmp/tmp.U6rqoK1p6b ++ rm /tmp/tmp.WUSzfrU032 /tmp/tmp.U6rqoK1p6b ++ return 0 + client_pod=pxc-client-6c8cd976cc-cs7jc + wait_pod pxc-client-6c8cd976cc-cs7jc + local pod=pxc-client-6c8cd976cc-cs7jc + local max_retry=480 + local ns= ++ echo pxc-client-6c8cd976cc-cs7jc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-6c8cd976cc-cs7jc.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zbXkbvg6DH +++ mktemp ++ local LAST_ERR=/tmp/tmp.rmGAzSYo0e ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.zbXkbvg6DH ++ cat /tmp/tmp.rmGAzSYo0e ++ rm /tmp/tmp.zbXkbvg6DH /tmp/tmp.rmGAzSYo0e ++ return 0 + client_pod=pxc-client-6c8cd976cc-cs7jc + wait_pod pxc-client-6c8cd976cc-cs7jc + local pod=pxc-client-6c8cd976cc-cs7jc + local max_retry=480 + local ns= ++ echo pxc-client-6c8cd976cc-cs7jc ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-6c8cd976cc-cs7jc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.4DJIKxAFYN/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.4DJIKxAFYN/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rYaM8e9K8r +++ mktemp ++ local LAST_ERR=/tmp/tmp.NFKiAMov55 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.rYaM8e9K8r ++ cat /tmp/tmp.NFKiAMov55 ++ rm /tmp/tmp.rYaM8e9K8r /tmp/tmp.NFKiAMov55 ++ return 0 + client_pod=pxc-client-6c8cd976cc-cs7jc + wait_pod pxc-client-6c8cd976cc-cs7jc + local pod=pxc-client-6c8cd976cc-cs7jc + local max_retry=480 + local ns= ++ echo pxc-client-6c8cd976cc-cs7jc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-6c8cd976cc-cs7jc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.4DJIKxAFYN/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.4DJIKxAFYN/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yMCwK7LxtB +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xxhq18pNVL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.yMCwK7LxtB ++ cat /tmp/tmp.Xxhq18pNVL ++ rm /tmp/tmp.yMCwK7LxtB /tmp/tmp.Xxhq18pNVL ++ return 0 + client_pod=pxc-client-6c8cd976cc-cs7jc + wait_pod pxc-client-6c8cd976cc-cs7jc + local pod=pxc-client-6c8cd976cc-cs7jc + local max_retry=480 + local ns= ++ echo pxc-client-6c8cd976cc-cs7jc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-6c8cd976cc-cs7jc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.4DJIKxAFYN/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.4DJIKxAFYN/select-1.sql ++ is_keyring_plugin_in_use upgrade-proxysql ++ local cluster=upgrade-proxysql ++ egrep -o 'early-plugin-load=keyring_\w+.so' ++ kubectl_bin exec -it upgrade-proxysql-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CwUHWiUpkL +++ mktemp ++ local LAST_ERR=/tmp/tmp.MfSiXi0LH7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl exec -it upgrade-proxysql-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.CwUHWiUpkL ++ cat /tmp/tmp.MfSiXi0LH7 Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.CwUHWiUpkL /tmp/tmp.MfSiXi0LH7 ++ return 0 + '[' '' ']' + compare_generation 1 proxysql upgrade-proxysql + local generation=1 + local proxy=proxysql + local cluster=upgrade-proxysql + local current_generation + [[ proxysql == \h\a\p\r\o\x\y ]] + containers=(pxc proxysql) + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JwLbUdMtY5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GSNGUrdmrx ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.JwLbUdMtY5 ++ cat /tmp/tmp.GSNGUrdmrx ++ rm /tmp/tmp.JwLbUdMtY5 /tmp/tmp.GSNGUrdmrx ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aUG1pJiRC0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DowegfxMGR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.aUG1pJiRC0 ++ cat /tmp/tmp.DowegfxMGR ++ rm /tmp/tmp.aUG1pJiRC0 /tmp/tmp.DowegfxMGR ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'upgrade operator' + set +o xtrace ----------------------------------------------------------------------------------- upgrade operator ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.yJHarocyhf ++ mktemp + local LAST_ERR=/tmp/tmp.HE3AyzF8vd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/crd.yaml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.yJHarocyhf customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com configured customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com unchanged customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com unchanged customresourcedefinition.apiextensions.k8s.io/perconaxtradbbackups.pxc.percona.com configured + cat /tmp/tmp.HE3AyzF8vd + rm /tmp/tmp.yJHarocyhf /tmp/tmp.HE3AyzF8vd + return 0 + [[ -n pxc-operator ]] + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/cw-rbac.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.1MNwDkOy5H + sed -e 's^namespace: .*^namespace: pxc-operator^' ++ mktemp + local LAST_ERR=/tmp/tmp.TvAJRcsPmW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.1MNwDkOy5H clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.TvAJRcsPmW + rm /tmp/tmp.1MNwDkOy5H /tmp/tmp.TvAJRcsPmW + return 0 + kubectl_bin patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a"}]}}}}' -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.LIUFdQ2Zwa ++ mktemp + local LAST_ERR=/tmp/tmp.Fc0gXCa6dn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl patch deployment percona-xtradb-cluster-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-xtradb-cluster-operator","image":"perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a"}]}}}}' -n pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.LIUFdQ2Zwa deployment.apps/percona-xtradb-cluster-operator patched + cat /tmp/tmp.Fc0gXCa6dn + rm /tmp/tmp.LIUFdQ2Zwa /tmp/tmp.Fc0gXCa6dn + return 0 + kubectl_bin rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.1AgglYaPbT ++ mktemp + local LAST_ERR=/tmp/tmp.S8ejaCU3lH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl rollout status deployment/percona-xtradb-cluster-operator -n pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.1AgglYaPbT Waiting for deployment "percona-xtradb-cluster-operator" rollout to finish: 0 of 1 updated replicas are available... deployment "percona-xtradb-cluster-operator" successfully rolled out + cat /tmp/tmp.S8ejaCU3lH + rm /tmp/tmp.1AgglYaPbT /tmp/tmp.S8ejaCU3lH + return 0 + sleep 10 + desc 'wait for operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- wait for operator upgrade ----------------------------------------------------------------------------------- ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ grep -vc NAME +++ mktemp ++ awk '{print $1}' ++ local LAST_OUT=/tmp/tmp.4Nmqbc9T3E +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ec2cWFoXpR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' -n pxc-operator ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.4Nmqbc9T3E ++ cat /tmp/tmp.Ec2cWFoXpR ++ rm /tmp/tmp.4Nmqbc9T3E /tmp/tmp.Ec2cWFoXpR ++ return 0 + [[ 1 -eq 1 ]] + '[' -n pxc-operator ']' ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.QJLvxRg5w1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1MN2W7lQim ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.QJLvxRg5w1 ++ cat /tmp/tmp.1MN2W7lQim ++ rm /tmp/tmp.QJLvxRg5w1 /tmp/tmp.1MN2W7lQim ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.p5bwybDNUw ++ mktemp + local LAST_ERR=/tmp/tmp.xXYyHheb5s + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade --namespace=pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.p5bwybDNUw Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade" modified. + cat /tmp/tmp.xXYyHheb5s + rm /tmp/tmp.p5bwybDNUw /tmp/tmp.xXYyHheb5s + return 0 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' +++ mktemp ++ grep perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a ++ awk '{print $1}' ++ local LAST_OUT=/tmp/tmp.2Zdcgjga41 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RcrqTR7u2t ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.2Zdcgjga41 ++ cat /tmp/tmp.RcrqTR7u2t ++ rm /tmp/tmp.2Zdcgjga41 /tmp/tmp.RcrqTR7u2t ++ return 0 + wait_pod percona-xtradb-cluster-operator-67c489499d-hc2ll + local pod=percona-xtradb-cluster-operator-67c489499d-hc2ll + local max_retry=480 + local ns= ++ echo percona-xtradb-cluster-operator-67c489499d-hc2ll ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace percona-xtradb-cluster-operator-67c489499d-hc2ll.Ok ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.3Q2iVYIEud +++ mktemp ++ local LAST_ERR=/tmp/tmp.JawSrv0w4t ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.3Q2iVYIEud ++ cat /tmp/tmp.JawSrv0w4t ++ rm /tmp/tmp.3Q2iVYIEud /tmp/tmp.JawSrv0w4t ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade --namespace=upgrade-proxysql-20589 ++ mktemp + local LAST_OUT=/tmp/tmp.kaym3OcHEy ++ mktemp + local LAST_ERR=/tmp/tmp.SvF629IbHo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade --namespace=upgrade-proxysql-20589 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.kaym3OcHEy Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-upgrade" modified. + cat /tmp/tmp.SvF629IbHo + rm /tmp/tmp.kaym3OcHEy /tmp/tmp.SvF629IbHo + return 0 + desc 'check images and generation after operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after operator upgrade ----------------------------------------------------------------------------------- + check_pxc_liveness upgrade-proxysql 3 + local cluster=upgrade-proxysql + local cluster_size=3 + wait_cluster_consistency upgrade-proxysql 3 + local cluster_name=upgrade-proxysql + local cluster_size=3 + local proxy_size= + '[' -z '' ']' + proxy_size=3 + sleep 7 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.27Qb1oDvlt +++ mktemp ++ local LAST_ERR=/tmp/tmp.QAsj3aVnb7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.27Qb1oDvlt ++ cat /tmp/tmp.QAsj3aVnb7 ++ rm /tmp/tmp.27Qb1oDvlt /tmp/tmp.QAsj3aVnb7 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vuDc9vsuga +++ mktemp ++ local LAST_ERR=/tmp/tmp.xHMZcQWi3M ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.vuDc9vsuga ++ cat /tmp/tmp.xHMZcQWi3M ++ rm /tmp/tmp.vuDc9vsuga /tmp/tmp.xHMZcQWi3M ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine upgrade-proxysql +++ local cluster_name=upgrade-proxysql ++++ get_proxy upgrade-proxysql ++++ local target_cluster=upgrade-proxysql +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.askw0uJriM ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.C8II3q9U6q +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.askw0uJriM +++++ cat /tmp/tmp.C8II3q9U6q +++++ rm /tmp/tmp.askw0uJriM /tmp/tmp.C8II3q9U6q +++++ return 0 ++++ [[ false == \t\r\u\e ]] +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.0D1Mkts7YB ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.cmUvhkiEId +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.0D1Mkts7YB +++++ cat /tmp/tmp.cmUvhkiEId +++++ rm /tmp/tmp.0D1Mkts7YB /tmp/tmp.cmUvhkiEId +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo upgrade-proxysql-proxysql ++++ return +++ local cluster_proxy=upgrade-proxysql-proxysql +++ echo proxysql ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dHEjk0fzf8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ObCkG9xQgP ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.dHEjk0fzf8 ++ cat /tmp/tmp.ObCkG9xQgP ++ rm /tmp/tmp.dHEjk0fzf8 /tmp/tmp.ObCkG9xQgP ++ return 0 + [[ 3 == \3 ]] + wait_for_running upgrade-proxysql-pxc 3 + local name=upgrade-proxysql-pxc + let last_pod=2 + local max_retry=480 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-0 480 + local pod=upgrade-proxysql-pxc-0 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo upgrade-proxysql-pxc-0 + local container=pxc + set +o xtrace upgrade-proxysql-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-1 480 + local pod=upgrade-proxysql-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-1 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace upgrade-proxysql-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-2 480 + local pod=upgrade-proxysql-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace upgrade-proxysql-pxc-2.Ok ++ seq 0 2 + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rdg19xFpKB +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZUmNCgPfnD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.Rdg19xFpKB ++ cat /tmp/tmp.ZUmNCgPfnD ++ rm /tmp/tmp.Rdg19xFpKB /tmp/tmp.ZUmNCgPfnD ++ return 0 + client_pod=pxc-client-6c8cd976cc-cs7jc + wait_pod pxc-client-6c8cd976cc-cs7jc + local pod=pxc-client-6c8cd976cc-cs7jc + local max_retry=480 + local ns= ++ echo pxc-client-6c8cd976cc-cs7jc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-6c8cd976cc-cs7jc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.4DJIKxAFYN/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.4DJIKxAFYN/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OgGCALPrAJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Al8JPQxCXG ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.OgGCALPrAJ ++ cat /tmp/tmp.Al8JPQxCXG ++ rm /tmp/tmp.OgGCALPrAJ /tmp/tmp.Al8JPQxCXG ++ return 0 + client_pod=pxc-client-6c8cd976cc-cs7jc + wait_pod pxc-client-6c8cd976cc-cs7jc + local pod=pxc-client-6c8cd976cc-cs7jc + local max_retry=480 + local ns= ++ echo pxc-client-6c8cd976cc-cs7jc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-6c8cd976cc-cs7jc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.4DJIKxAFYN/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.4DJIKxAFYN/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V1VvQYLniU +++ mktemp ++ local LAST_ERR=/tmp/tmp.Bj2Pez3qkz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.V1VvQYLniU ++ cat /tmp/tmp.Bj2Pez3qkz ++ rm /tmp/tmp.V1VvQYLniU /tmp/tmp.Bj2Pez3qkz ++ return 0 + client_pod=pxc-client-6c8cd976cc-cs7jc + wait_pod pxc-client-6c8cd976cc-cs7jc + local pod=pxc-client-6c8cd976cc-cs7jc + local max_retry=480 + local ns= ++ echo pxc-client-6c8cd976cc-cs7jc ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pxc-client-6c8cd976cc-cs7jc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.4DJIKxAFYN/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.4DJIKxAFYN/select-1.sql ++ kubectl_bin get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yNpjmCnt1l +++ mktemp ++ local LAST_ERR=/tmp/tmp.JhNsnGf4DM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.yNpjmCnt1l ++ cat /tmp/tmp.JhNsnGf4DM ++ rm /tmp/tmp.yNpjmCnt1l /tmp/tmp.JhNsnGf4DM ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a == perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZT1ZXidfQ1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.L1CO3QPyYI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.ZT1ZXidfQ1 ++ cat /tmp/tmp.L1CO3QPyYI ++ rm /tmp/tmp.ZT1ZXidfQ1 /tmp/tmp.L1CO3QPyYI ++ return 0 + [[ percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25 == percona/percona-xtradb-cluster-operator:1.10.0-proxysql-8.0.25 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iNvH9k28qD +++ mktemp ++ local LAST_ERR=/tmp/tmp.hbgujZlp89 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.iNvH9k28qD ++ cat /tmp/tmp.hbgujZlp89 ++ rm /tmp/tmp.iNvH9k28qD /tmp/tmp.hbgujZlp89 ++ return 0 + [[ percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25 == percona/percona-xtradb-cluster-operator:1.10.0-haproxy-8.0.25 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wYbBssr6Yw +++ mktemp ++ local LAST_ERR=/tmp/tmp.aZlXRmmxez ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.wYbBssr6Yw ++ cat /tmp/tmp.aZlXRmmxez ++ rm /tmp/tmp.wYbBssr6Yw /tmp/tmp.aZlXRmmxez ++ return 0 + [[ percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup == percona/percona-xtradb-cluster-operator:1.10.0-pxc8.0.25-backup ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hcQeHIwINY +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kfn8jJUp0x ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.hcQeHIwINY ++ cat /tmp/tmp.Kfn8jJUp0x ++ rm /tmp/tmp.hcQeHIwINY /tmp/tmp.Kfn8jJUp0x ++ return 0 + [[ percona/pmm-client:2.23.0 == percona/pmm-client:2.23.0 ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BnPDvhvtbL +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ue5CZ3KXXN ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.BnPDvhvtbL ++ cat /tmp/tmp.Ue5CZ3KXXN ++ rm /tmp/tmp.BnPDvhvtbL /tmp/tmp.Ue5CZ3KXXN ++ return 0 + [[ percona/percona-xtradb-cluster:8.0.25-15.1 == percona/percona-xtradb-cluster:8.0.25-15.1 ]] + : Operator image has been updated correctly + compare_generation 1 proxysql upgrade-proxysql + local generation=1 + local proxy=proxysql + local cluster=upgrade-proxysql + local current_generation + [[ proxysql == \h\a\p\r\o\x\y ]] + containers=(pxc proxysql) + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jCFvnuLvOV +++ mktemp ++ local LAST_ERR=/tmp/tmp.o32cGXMLhq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.jCFvnuLvOV ++ cat /tmp/tmp.o32cGXMLhq ++ rm /tmp/tmp.jCFvnuLvOV /tmp/tmp.o32cGXMLhq ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1TGqI5KyXm +++ mktemp ++ local LAST_ERR=/tmp/tmp.AwoZqikZUP ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.1TGqI5KyXm ++ cat /tmp/tmp.AwoZqikZUP ++ rm /tmp/tmp.1TGqI5KyXm /tmp/tmp.AwoZqikZUP ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'patch pxc images and upgrade' + set +o xtrace ----------------------------------------------------------------------------------- patch pxc images and upgrade ----------------------------------------------------------------------------------- + kubectl_bin patch pxc upgrade-proxysql --type=merge --patch '{ "spec": { "crVersion": "1.11.0", "pxc": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0" }, "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "haproxy": { "image": "perconalab/percona-xtradb-cluster-operator:main-haproxy" }, "proxysql": { "image": "perconalab/percona-xtradb-cluster-operator:main-proxysql" }, "backup": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup" } }}' ++ mktemp + local LAST_OUT=/tmp/tmp.skiDofnidJ ++ mktemp + local LAST_ERR=/tmp/tmp.345RGUF0H7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl patch pxc upgrade-proxysql --type=merge --patch '{ "spec": { "crVersion": "1.11.0", "pxc": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0" }, "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "haproxy": { "image": "perconalab/percona-xtradb-cluster-operator:main-haproxy" }, "proxysql": { "image": "perconalab/percona-xtradb-cluster-operator:main-proxysql" }, "backup": { "image": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup" } }}' + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.skiDofnidJ perconaxtradbcluster.pxc.percona.com/upgrade-proxysql patched + cat /tmp/tmp.345RGUF0H7 + rm /tmp/tmp.skiDofnidJ /tmp/tmp.345RGUF0H7 + return 0 + sleep 10 + desc 'check images and generation after full upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after full upgrade ----------------------------------------------------------------------------------- + check_pxc_liveness upgrade-proxysql 3 + local cluster=upgrade-proxysql + local cluster_size=3 + wait_cluster_consistency upgrade-proxysql 3 + local cluster_name=upgrade-proxysql + local cluster_size=3 + local proxy_size= + '[' -z '' ']' + proxy_size=3 + sleep 7 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XE4dTOpUaD +++ mktemp ++ local LAST_ERR=/tmp/tmp.5JCrKEwiJs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.XE4dTOpUaD ++ cat /tmp/tmp.5JCrKEwiJs ++ rm /tmp/tmp.XE4dTOpUaD /tmp/tmp.5JCrKEwiJs ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NfQYDolO0X +++ mktemp ++ local LAST_ERR=/tmp/tmp.uoYn4f6qTf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.NfQYDolO0X ++ cat /tmp/tmp.uoYn4f6qTf ++ rm /tmp/tmp.NfQYDolO0X /tmp/tmp.uoYn4f6qTf ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mivWIm8fic +++ mktemp ++ local LAST_ERR=/tmp/tmp.qNMitzXwWV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.mivWIm8fic ++ cat /tmp/tmp.qNMitzXwWV ++ rm /tmp/tmp.mivWIm8fic /tmp/tmp.qNMitzXwWV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XnLoLnpaXy +++ mktemp ++ local LAST_ERR=/tmp/tmp.fjJuRnl8xL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.XnLoLnpaXy ++ cat /tmp/tmp.fjJuRnl8xL ++ rm /tmp/tmp.XnLoLnpaXy /tmp/tmp.fjJuRnl8xL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fJgrzENBGH +++ mktemp ++ local LAST_ERR=/tmp/tmp.7vyNXD4vc7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.fJgrzENBGH ++ cat /tmp/tmp.7vyNXD4vc7 ++ rm /tmp/tmp.fJgrzENBGH /tmp/tmp.7vyNXD4vc7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ULfh6irF3x +++ mktemp ++ local LAST_ERR=/tmp/tmp.dl7H6HFZQe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.ULfh6irF3x ++ cat /tmp/tmp.dl7H6HFZQe ++ rm /tmp/tmp.ULfh6irF3x /tmp/tmp.dl7H6HFZQe ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.skv5ytlfPY +++ mktemp ++ local LAST_ERR=/tmp/tmp.mo37X2HtGk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.skv5ytlfPY ++ cat /tmp/tmp.mo37X2HtGk ++ rm /tmp/tmp.skv5ytlfPY /tmp/tmp.mo37X2HtGk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ql2gxWhbku +++ mktemp ++ local LAST_ERR=/tmp/tmp.kGOqpkpztM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.Ql2gxWhbku ++ cat /tmp/tmp.kGOqpkpztM ++ rm /tmp/tmp.Ql2gxWhbku /tmp/tmp.kGOqpkpztM ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cuyyWUAtkF +++ mktemp ++ local LAST_ERR=/tmp/tmp.pc3mU0YmUo ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.cuyyWUAtkF ++ cat /tmp/tmp.pc3mU0YmUo ++ rm /tmp/tmp.cuyyWUAtkF /tmp/tmp.pc3mU0YmUo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e6KkabmCTP +++ mktemp ++ local LAST_ERR=/tmp/tmp.UyOw7KGJzq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.e6KkabmCTP ++ cat /tmp/tmp.UyOw7KGJzq ++ rm /tmp/tmp.e6KkabmCTP /tmp/tmp.UyOw7KGJzq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ea5Ho04ZtU +++ mktemp ++ local LAST_ERR=/tmp/tmp.JUE5bdW7kZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.ea5Ho04ZtU ++ cat /tmp/tmp.JUE5bdW7kZ ++ rm /tmp/tmp.ea5Ho04ZtU /tmp/tmp.JUE5bdW7kZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jSWEuiuzu7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.iAWNuNCcfZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.jSWEuiuzu7 ++ cat /tmp/tmp.iAWNuNCcfZ ++ rm /tmp/tmp.jSWEuiuzu7 /tmp/tmp.iAWNuNCcfZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MhQ0DwnLBN +++ mktemp ++ local LAST_ERR=/tmp/tmp.2Y5PLRYZqr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.MhQ0DwnLBN ++ cat /tmp/tmp.2Y5PLRYZqr ++ rm /tmp/tmp.MhQ0DwnLBN /tmp/tmp.2Y5PLRYZqr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G502JbK08o +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z5wMwLcqlU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.G502JbK08o ++ cat /tmp/tmp.Z5wMwLcqlU ++ rm /tmp/tmp.G502JbK08o /tmp/tmp.Z5wMwLcqlU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GgcEGCadkp +++ mktemp ++ local LAST_ERR=/tmp/tmp.NWYIG1WGwS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.GgcEGCadkp ++ cat /tmp/tmp.NWYIG1WGwS ++ rm /tmp/tmp.GgcEGCadkp /tmp/tmp.NWYIG1WGwS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U0LXVQOrXr +++ mktemp ++ local LAST_ERR=/tmp/tmp.wLt8AtJIyu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.U0LXVQOrXr ++ cat /tmp/tmp.wLt8AtJIyu ++ rm /tmp/tmp.U0LXVQOrXr /tmp/tmp.wLt8AtJIyu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n0feqGNtmD +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZzLzTw2UBe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.n0feqGNtmD ++ cat /tmp/tmp.ZzLzTw2UBe ++ rm /tmp/tmp.n0feqGNtmD /tmp/tmp.ZzLzTw2UBe ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CzNbSuOH89 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y4CzfYfzKk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.CzNbSuOH89 ++ cat /tmp/tmp.Y4CzfYfzKk ++ rm /tmp/tmp.CzNbSuOH89 /tmp/tmp.Y4CzfYfzKk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qTprv9Jaur +++ mktemp ++ local LAST_ERR=/tmp/tmp.R45nWuw04S ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.qTprv9Jaur ++ cat /tmp/tmp.R45nWuw04S ++ rm /tmp/tmp.qTprv9Jaur /tmp/tmp.R45nWuw04S ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PVwdkc19H4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.fLY2VroryJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.PVwdkc19H4 ++ cat /tmp/tmp.fLY2VroryJ ++ rm /tmp/tmp.PVwdkc19H4 /tmp/tmp.fLY2VroryJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dGKOJghnya +++ mktemp ++ local LAST_ERR=/tmp/tmp.BEpJ1ZFBhc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.dGKOJghnya ++ cat /tmp/tmp.BEpJ1ZFBhc ++ rm /tmp/tmp.dGKOJghnya /tmp/tmp.BEpJ1ZFBhc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QZQ9TmXOxC +++ mktemp ++ local LAST_ERR=/tmp/tmp.EkB6elOEj1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.QZQ9TmXOxC ++ cat /tmp/tmp.EkB6elOEj1 ++ rm /tmp/tmp.QZQ9TmXOxC /tmp/tmp.EkB6elOEj1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tIwM5jYr4o +++ mktemp ++ local LAST_ERR=/tmp/tmp.CW9kmOqOjJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.tIwM5jYr4o ++ cat /tmp/tmp.CW9kmOqOjJ ++ rm /tmp/tmp.tIwM5jYr4o /tmp/tmp.CW9kmOqOjJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mAvlBqRVMg +++ mktemp ++ local LAST_ERR=/tmp/tmp.hjN6s0lzDY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.mAvlBqRVMg ++ cat /tmp/tmp.hjN6s0lzDY ++ rm /tmp/tmp.mAvlBqRVMg /tmp/tmp.hjN6s0lzDY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8SowSH2jn1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.iqP3JfEyvJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.8SowSH2jn1 ++ cat /tmp/tmp.iqP3JfEyvJ ++ rm /tmp/tmp.8SowSH2jn1 /tmp/tmp.iqP3JfEyvJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qMZipMFWYx +++ mktemp ++ local LAST_ERR=/tmp/tmp.GAEQDUCSuY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.qMZipMFWYx ++ cat /tmp/tmp.GAEQDUCSuY ++ rm /tmp/tmp.qMZipMFWYx /tmp/tmp.GAEQDUCSuY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wNSwq2torJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.a54GDKYgqh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.state}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.wNSwq2torJ ++ cat /tmp/tmp.a54GDKYgqh ++ rm /tmp/tmp.wNSwq2torJ /tmp/tmp.a54GDKYgqh ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sTh4gbMQja +++ mktemp ++ local LAST_ERR=/tmp/tmp.WUyVuPRBrZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.sTh4gbMQja ++ cat /tmp/tmp.WUyVuPRBrZ ++ rm /tmp/tmp.sTh4gbMQja /tmp/tmp.WUyVuPRBrZ ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine upgrade-proxysql +++ local cluster_name=upgrade-proxysql ++++ get_proxy upgrade-proxysql ++++ local target_cluster=upgrade-proxysql +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.6pXr4ate0A ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.xXSIlqN8FV +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.6pXr4ate0A +++++ cat /tmp/tmp.xXSIlqN8FV +++++ rm /tmp/tmp.6pXr4ate0A /tmp/tmp.xXSIlqN8FV +++++ return 0 ++++ [[ false == \t\r\u\e ]] +++++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.kWuvjnaiTm ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.GPv6C1hhN8 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.enabled}' +++++ exit_status=0 +++++ [[ 0 != 0 ]] +++++ break +++++ cat /tmp/tmp.kWuvjnaiTm +++++ cat /tmp/tmp.GPv6C1hhN8 +++++ rm /tmp/tmp.kWuvjnaiTm /tmp/tmp.GPv6C1hhN8 +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo upgrade-proxysql-proxysql ++++ return +++ local cluster_proxy=upgrade-proxysql-proxysql +++ echo proxysql ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GW2hLSPgA3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kehFGUJwZ1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.status.proxysql.ready}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.GW2hLSPgA3 ++ cat /tmp/tmp.kehFGUJwZ1 ++ rm /tmp/tmp.GW2hLSPgA3 /tmp/tmp.kehFGUJwZ1 ++ return 0 + [[ 3 == \3 ]] + wait_for_running upgrade-proxysql-pxc 3 + local name=upgrade-proxysql-pxc + let last_pod=2 + local max_retry=480 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-0 480 + local pod=upgrade-proxysql-pxc-0 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace upgrade-proxysql-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-1 480 + local pod=upgrade-proxysql-pxc-1 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace upgrade-proxysql-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod upgrade-proxysql-pxc-2 480 + local pod=upgrade-proxysql-pxc-2 + local max_retry=480 + local ns= ++ echo upgrade-proxysql-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace upgrade-proxysql-pxc-2.Ok ++ seq 0 2 + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-0.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YHqYSYwc1C +++ mktemp ++ local LAST_ERR=/tmp/tmp.tOU6vpPexs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.YHqYSYwc1C ++ cat /tmp/tmp.tOU6vpPexs ++ rm /tmp/tmp.YHqYSYwc1C /tmp/tmp.tOU6vpPexs ++ return 0 + client_pod=pxc-client-6c8cd976cc-cs7jc + wait_pod pxc-client-6c8cd976cc-cs7jc + local pod=pxc-client-6c8cd976cc-cs7jc + local max_retry=480 + local ns= ++ echo pxc-client-6c8cd976cc-cs7jc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-6c8cd976cc-cs7jc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.4DJIKxAFYN/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.4DJIKxAFYN/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-1.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k3vBdjQ5ZI +++ mktemp ++ local LAST_ERR=/tmp/tmp.kI8JIJ3cAC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.k3vBdjQ5ZI ++ cat /tmp/tmp.kI8JIJ3cAC ++ rm /tmp/tmp.k3vBdjQ5ZI /tmp/tmp.kI8JIJ3cAC ++ return 0 + client_pod=pxc-client-6c8cd976cc-cs7jc + wait_pod pxc-client-6c8cd976cc-cs7jc + local pod=pxc-client-6c8cd976cc-cs7jc + local max_retry=480 + local ns= ++ echo pxc-client-6c8cd976cc-cs7jc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-6c8cd976cc-cs7jc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.4DJIKxAFYN/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.4DJIKxAFYN/select-1.sql + for i in '$(seq 0 $((cluster_size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h upgrade-proxysql-pxc-2.upgrade-proxysql-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H3IPdv5xxW +++ mktemp ++ local LAST_ERR=/tmp/tmp.k5QeadYRHi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.H3IPdv5xxW ++ cat /tmp/tmp.k5QeadYRHi ++ rm /tmp/tmp.H3IPdv5xxW /tmp/tmp.k5QeadYRHi ++ return 0 + client_pod=pxc-client-6c8cd976cc-cs7jc + wait_pod pxc-client-6c8cd976cc-cs7jc + local pod=pxc-client-6c8cd976cc-cs7jc + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-6c8cd976cc-cs7jc + local container= + set +o xtrace pxc-client-6c8cd976cc-cs7jc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.4DJIKxAFYN/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/select-1.sql /tmp/tmp.4DJIKxAFYN/select-1.sql ++ kubectl_bin get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pAncM5Xejf +++ mktemp ++ local LAST_ERR=/tmp/tmp.BRX7SWMqg0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pod -n pxc-operator --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-xtradb-cluster-operator")].image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.pAncM5Xejf ++ cat /tmp/tmp.BRX7SWMqg0 ++ rm /tmp/tmp.pAncM5Xejf /tmp/tmp.BRX7SWMqg0 ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a == perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gSatou6wGc +++ mktemp ++ local LAST_ERR=/tmp/tmp.W66hOYXh9o ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.proxysql.image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.gSatou6wGc ++ cat /tmp/tmp.W66hOYXh9o ++ rm /tmp/tmp.gSatou6wGc /tmp/tmp.W66hOYXh9o ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-proxysql == perconalab/percona-xtradb-cluster-operator:main-proxysql ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kjFjjvBfvg +++ mktemp ++ local LAST_ERR=/tmp/tmp.TQGuoMeopA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.haproxy.image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.kjFjjvBfvg ++ cat /tmp/tmp.TQGuoMeopA ++ rm /tmp/tmp.kjFjjvBfvg /tmp/tmp.TQGuoMeopA ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-haproxy == perconalab/percona-xtradb-cluster-operator:main-haproxy ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TrAlgmbShV +++ mktemp ++ local LAST_ERR=/tmp/tmp.zTlgIuKOkb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.TrAlgmbShV ++ cat /tmp/tmp.zTlgIuKOkb ++ rm /tmp/tmp.TrAlgmbShV /tmp/tmp.zTlgIuKOkb ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup == perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8Ky6wRx5jA +++ mktemp ++ local LAST_ERR=/tmp/tmp.q32CkxobM3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.8Ky6wRx5jA ++ cat /tmp/tmp.q32CkxobM3 ++ rm /tmp/tmp.8Ky6wRx5jA /tmp/tmp.q32CkxobM3 ++ return 0 + [[ perconalab/pmm-client:dev-latest == perconalab/pmm-client:dev-latest ]] ++ kubectl_bin get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7wOsaKnDBR +++ mktemp ++ local LAST_ERR=/tmp/tmp.ckPUpLkzBA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pxc upgrade-proxysql -o 'jsonpath={.spec.pxc.image}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.7wOsaKnDBR ++ cat /tmp/tmp.ckPUpLkzBA ++ rm /tmp/tmp.7wOsaKnDBR /tmp/tmp.ckPUpLkzBA ++ return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 == perconalab/percona-xtradb-cluster-operator:main-pxc8.0 ]] + : Cluster images have been updated correctly + compare_generation 2 proxysql upgrade-proxysql + local generation=2 + local proxy=proxysql + local cluster=upgrade-proxysql + local current_generation + [[ proxysql == \h\a\p\r\o\x\y ]] + containers=(pxc proxysql) + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IQxkKmpvm5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LzUSoqDFRF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get statefulset upgrade-proxysql-pxc -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.IQxkKmpvm5 ++ cat /tmp/tmp.LzUSoqDFRF ++ rm /tmp/tmp.IQxkKmpvm5 /tmp/tmp.LzUSoqDFRF ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + for container in '"${containers[@]}"' ++ kubectl_bin get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0u8rOsTOYg +++ mktemp ++ local LAST_ERR=/tmp/tmp.CiFP5OWaPN ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get statefulset upgrade-proxysql-proxysql -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.0u8rOsTOYg ++ cat /tmp/tmp.CiFP5OWaPN ++ rm /tmp/tmp.0u8rOsTOYg /tmp/tmp.CiFP5OWaPN ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_kubectl statefulset/upgrade-proxysql-pxc + local resource=statefulset/upgrade-proxysql-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc.yml + local new_result=/tmp/tmp.4DJIKxAFYN/statefulset_upgrade-proxysql-pxc.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-eks.yml ']' + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml statefulset/upgrade-proxysql-pxc + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - '**.creationTimestamp' + yq d - '**.image' + yq d - spec.volumeMode + yq d - '**.procMount' + yq d - spec.nodeName + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.storageClassName' + yq d - '**.dataSource' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**.finalizers' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.clusterIP' + yq d - '**."percona.com/*"' + yq d - '**.clusterIPs' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.volumeName' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.(name==suffix)' + yq d - '**.healthCheckNodePort' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.enableServiceLinks' + yq d - '**.nodePort' + yq d - status + yq d - '**.(name==S3_BUCKET_URL)' + yq d - '**.imagePullSecrets' + yq d - '**.(name==NAMESPACE)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + yq d - '**.preemptionPolicy' + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + yq d - '**.creationTimestamp' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + /usr/bin/sed s/upgrade-proxysql-20589/namespace/g + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.DabZv3crfK ++ mktemp + local LAST_ERR=/tmp/tmp.T4XjgDdeIi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/upgrade-proxysql-pxc + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.DabZv3crfK + cat /tmp/tmp.T4XjgDdeIi + rm /tmp/tmp.DabZv3crfK /tmp/tmp.T4XjgDdeIi + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-pxc.yml /tmp/tmp.4DJIKxAFYN/statefulset_upgrade-proxysql-pxc.yml + compare_kubectl statefulset/upgrade-proxysql-proxysql + local resource=statefulset/upgrade-proxysql-proxysql + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql.yml + local new_result=/tmp/tmp.4DJIKxAFYN/statefulset_upgrade-proxysql-proxysql.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-eks.yml ']' + [[ percona/percona-xtradb-cluster:8.0.25-15.1 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml statefulset/upgrade-proxysql-proxysql + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.deletionTimestamp + yq d - metadata.selfLink + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.image' + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - '**.volumeName' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - spec.nodeName + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - spec.volumeMode + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.procMount' + yq d - '**.healthCheckNodePort' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.creationTimestamp' + yq d - '**.nodePort' + yq d - '**.dataSource' + yq d - '**.imagePullSecrets' + yq d - '**.finalizers' + yq d - '**.enableServiceLinks' + yq d - '**."percona.com/*"' + yq d - '**.storageClassName' + yq d - status + yq d - '**.(name==suffix)' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.(name==NAMESPACE)' + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.controller-uid' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.preemptionPolicy' + yq d - spec.ipFamilies + yq d - spec.ipFamilyPolicy + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.creationTimestamp' + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed s/upgrade-proxysql-20589/namespace/g + /usr/bin/sed 's/namespace\:.*name/name/' + yq d - metadata.managedFields ++ mktemp + local LAST_OUT=/tmp/tmp.O9gtN4WiZh + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' ++ mktemp + local LAST_ERR=/tmp/tmp.bzpN9LgKRW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/upgrade-proxysql-proxysql + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.O9gtN4WiZh + cat /tmp/tmp.bzpN9LgKRW + rm /tmp/tmp.O9gtN4WiZh /tmp/tmp.bzpN9LgKRW + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/upgrade-proxysql/compare/statefulset_upgrade-proxysql-proxysql.yml /tmp/tmp.4DJIKxAFYN/statefulset_upgrade-proxysql-proxysql.yml + desc cleanup + set +o xtrace ----------------------------------------------------------------------------------- cleanup ----------------------------------------------------------------------------------- + destroy upgrade-proxysql-20589 + local namespace=upgrade-proxysql-20589 + local ignore_logs=false + [[ false == \f\a\l\s\e ]] + grep -v 'get backup status: Job.batch' + grep -v 'the object has been modified' ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ + grep -v level=info + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u + tee /tmp/tmp.4DJIKxAFYN/operator.log +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.e7eop1HWon +++ mktemp ++ local LAST_ERR=/tmp/tmp.rXZOk7os1c ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.e7eop1HWon ++ cat /tmp/tmp.rXZOk7os1c ++ rm /tmp/tmp.e7eop1HWon /tmp/tmp.rXZOk7os1c ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-67c489499d-hc2ll ++ mktemp + local LAST_OUT=/tmp/tmp.bKsyp7eBDQ ++ mktemp + local LAST_ERR=/tmp/tmp.4CL6CXoEfl + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-67c489499d-hc2ll + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.bKsyp7eBDQ + cat /tmp/tmp.4CL6CXoEfl + rm /tmp/tmp.bKsyp7eBDQ /tmp/tmp.4CL6CXoEfl + return 0 I0412 10:01:24.819708 1 request.go:645] Throttling request took 1.031278626s, request: GET:https://10.67.240.1:443/apis/snapshot.storage.k8s.io/v1beta1?timeout=32s {"level":"info",,"caller":"pxc/backup.go:87","msg":"Creating or updating backup job","name":"9f165-daily-backup","schedule":"0 0 * * *"} {"level":"info",,"caller":"pxc/backup.go:87","msg":"Creating or updating backup job","name":"9f165-sat-night-backup","schedule":"0 0 * * 6"} {"level":"info",,"caller":"pxc/upgrade.go:266","msg":"statefulSet was changed, run smart update"} {"level":"info",,"caller":"pxc/upgrade.go:294","msg":"primary pod","pod name":"upgrade-proxysql-pxc-0.upgrade-proxysql-pxc.upgrade-proxysql-20589.svc.cluster.local"} {"level":"info",,"caller":"pxc/upgrade.go:311","msg":"apply changes to secondary pod","pod name":"upgrade-proxysql-pxc-1"} {"level":"info",,"caller":"pxc/upgrade.go:311","msg":"apply changes to secondary pod","pod name":"upgrade-proxysql-pxc-2"} {"level":"info",,"caller":"pxc/upgrade.go:318","msg":"apply changes to primary pod","pod name":"upgrade-proxysql-pxc-0"} {"level":"info",,"caller":"pxc/upgrade.go:323","msg":"smart update finished"} {"level":"info",,"caller":"pxc/upgrade.go:395","msg":"pod present in hostgroups","pod name":"upgrade-proxysql-pxc-0"} {"level":"info",,"caller":"pxc/upgrade.go:395","msg":"pod present in hostgroups","pod name":"upgrade-proxysql-pxc-1"} {"level":"info",,"caller":"pxc/upgrade.go:395","msg":"pod present in hostgroups","pod name":"upgrade-proxysql-pxc-2"} {"level":"info",,"caller":"pxc/upgrade.go:427","msg":"pod is online","pod name":"upgrade-proxysql-pxc-0"} {"level":"info",,"caller":"pxc/upgrade.go:427","msg":"pod is online","pod name":"upgrade-proxysql-pxc-1"} {"level":"info",,"caller":"pxc/upgrade.go:427","msg":"pod is online","pod name":"upgrade-proxysql-pxc-2"} {"level":"info",,"caller":"pxc/upgrade.go:588","msg":"pod is running","pod name":"upgrade-proxysql-pxc-0"} {"level":"info",,"caller":"pxc/upgrade.go:588","msg":"pod is running","pod name":"upgrade-proxysql-pxc-1"} {"level":"info",,"caller":"pxc/upgrade.go:588","msg":"pod is running","pod name":"upgrade-proxysql-pxc-2"} {"level":"info",,"caller":"pxc/version.go:328","msg":"update PXC version (fetched from db)","new version":"8.0.27-18.1"} {"level":"info",,"logger":"cmd","msg":"Git commit: 706f792ae47c369cb3556faff186b6873a8a247f Git branch: PR-1125-706f792a Build time: 2022-04-12T09:09:41Z"} {"level":"info",,"logger":"cmd","msg":"Go OS/Arch: linux/amd64"} {"level":"info",,"logger":"cmd","msg":"Go Version: go1.17.8"} {"level":"info",,"logger":"cmd","msg":"operator-sdk Version: v0.19.4"} {"level":"info",,"logger":"cmd","msg":"Registering Components."} {"level":"info",,"logger":"cmd","msg":"Runs on","platform":"kubernetes","version":"v1.20.15-gke.4100"} {"level":"info",,"logger":"cmd","msg":"Starting the Cmd."} {"level":"info",,"logger":"controller-runtime.certwatcher","msg":"Starting certificate watcher"} {"level":"info",,"logger":"controller-runtime.certwatcher","msg":"Updated current TLS certificate"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager","msg":"starting metrics server","path":"/metrics"} {"level":"info",,"logger":"controller-runtime.metrics","msg":"metrics server is starting to listen","addr":":8080"} {"level":"info",,"logger":"controller-runtime.webhook","msg":"registering webhook","path":"/validate-percona-xtradbcluster"} {"level":"info",,"logger":"controller-runtime.webhook","msg":"serving webhook server","host":"","port":9443} {"level":"info",,"logger":"controller-runtime.webhook.webhooks","msg":"starting webhook server"} {"level":"info",,"logger":"leader","msg":"Became the leader."} {"level":"info",,"logger":"leader","msg":"Found existing lock","LockOwner":"percona-xtradb-cluster-operator-6855f87d75-j4hn9"} {"level":"info",,"logger":"leader","msg":"Leader pod has been deleted, waiting for garbage collection to remove the lock."} {"level":"info",,"logger":"leader","msg":"Not the leader. Waiting."} {"level":"info",,"logger":"leader","msg":"Trying to become the leader."} [mysql] 2022/04/12 10:11:24 packets.go:36: unexpected EOF + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n upgrade-proxysql-20589 upgrade-proxysql --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/upgrade-proxysql patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.UMP64dUf9Q ++ mktemp + local LAST_ERR=/tmp/tmp.T0pcxFYhJZ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.UMP64dUf9Q perconaxtradbcluster.pxc.percona.com "upgrade-proxysql" deleted + cat /tmp/tmp.T0pcxFYhJZ + rm /tmp/tmp.UMP64dUf9Q /tmp/tmp.T0pcxFYhJZ + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.tO1dRifwgm ++ mktemp + local LAST_ERR=/tmp/tmp.U2wHitNe0O + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.tO1dRifwgm No resources found + cat /tmp/tmp.U2wHitNe0O + rm /tmp/tmp.tO1dRifwgm /tmp/tmp.U2wHitNe0O + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.y70FeCZBru ++ mktemp + local LAST_ERR=/tmp/tmp.LPC4AIA1dR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.y70FeCZBru No resources found + cat /tmp/tmp.LPC4AIA1dR + rm /tmp/tmp.y70FeCZBru /tmp/tmp.LPC4AIA1dR + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.9NFZVCbEzN ++ mktemp + local LAST_ERR=/tmp/tmp.6IqzGtelnF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.9NFZVCbEzN validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.6IqzGtelnF + rm /tmp/tmp.9NFZVCbEzN /tmp/tmp.6IqzGtelnF + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + rm -rf /tmp/tmp.4DJIKxAFYN + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator + kubectl_bin delete --grace-period=0 --force=true namespace upgrade-proxysql-20589 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.SQ4qeIt3O3 + local LAST_OUT=/tmp/tmp.NlztA6G61V ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.oFetZst6en + local exit_status=0 + local LAST_ERR=/tmp/tmp.4VrSV8rHdO + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete --grace-period=0 --force=true namespace upgrade-proxysql-20589 + for i in '$(seq 0 2)' + kubectl delete --grace-period=0 --force=true namespace pxc-operator