Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/e2e-tests/logs/upgrade-sharded.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + cluster=upgrade-sharded + CLUSTER_SIZE=3 + TARGET_OPERATOR_VER=1.17.0 + TARGET_IMAGE=perconalab/percona-server-mongodb-operator:PR-1559-6384b519 + TARGET_IMAGE_MONGOD=perconalab/percona-server-mongodb-operator:main-mongod7.0 + TARGET_IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest + TARGET_IMAGE_BACKUP=perconalab/percona-server-mongodb-operator:main-backup ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod7.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod7.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ local 'cli=mongod --version' +++ local pod_name=26351 +++ /usr/bin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ kubectl_bin -n default run 26351 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7yNxLz4cRl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CiB2apj2nb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default run 26351 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7yNxLz4cRl +++ cat /tmp/tmp.CiB2apj2nb +++ rm /tmp/tmp.7yNxLz4cRl /tmp/tmp.CiB2apj2nb +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/26351 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.paZuLuJT7A ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mEjDbUkm8z +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/26351 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.paZuLuJT7A +++ cat /tmp/tmp.mEjDbUkm8z +++ rm /tmp/tmp.paZuLuJT7A /tmp/tmp.mEjDbUkm8z +++ return 0 ++++ kubectl_bin -n default exec 26351 -- mongod --version +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.nzTaRwhtut +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Jkus5FUmms ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default exec 26351 -- mongod --version ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.nzTaRwhtut ++++ cat /tmp/tmp.Jkus5FUmms ++++ rm /tmp/tmp.nzTaRwhtut /tmp/tmp.Jkus5FUmms ++++ return 0 +++ local 'output=db version v7.0.11-6 Build Info: { "version": "7.0.11-6", "gitVersion": "0818dfdcc4349542e73dd63f56ab35f92498a115", "openSSLVersion": "OpenSSL 1.1.1k FIPS 25 Mar 2021", "modules": [], "proFeatures": [], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/26351 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.43ufSAoxNx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vssDkYeOvF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default delete pod/26351 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.43ufSAoxNx +++ cat /tmp/tmp.vssDkYeOvF Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.43ufSAoxNx /tmp/tmp.vssDkYeOvF +++ return 0 +++ echo db version v7.0.11-6 Build Info: '{' '"version":' '"7.0.11-6",' '"gitVersion":' '"0818dfdcc4349542e73dd63f56ab35f92498a115",' '"openSSLVersion":' '"OpenSSL' 1.1.1k FIPS 25 Mar '2021",' '"modules":' '[],' '"proFeatures":' '[],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=7.0.11-6 ++ [[ ! 7.0.11-6 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 7.0.11-6 + FULL_VER=7.0.11-6 + MONGO_VER=7.0 ++ curl -s https://check.percona.com/versions/v1/psmdb-operator ++ jq -r '.versions[].operator' ++ tail -n1 ++ sort -V + INIT_OPERATOR_VER=1.16.0 + [[ 1.16.0 == \1\.\1\7\.\0 ]] + GIT_TAG=v1.16.0 + case $(curl -s -o /dev/null -w "%{http_code}" 'https://check.percona.com/versions/v1/psmdb-operator/'${INIT_OPERATOR_VER}/'latest?databaseVersion='${MONGO_VER}'') in ++ curl -s -o /dev/null -w '%{http_code}' 'https://check.percona.com/versions/v1/psmdb-operator/1.16.0/latest?databaseVersion=7.0' ++ curl -s 'https://check.percona.com/versions/v1/psmdb-operator/1.16.0/latest?databaseVersion=7.0' + INIT_OPERATOR_IMAGES='{"versions":[{"product":"psmdb-operator", "operator":"1.16.0", "matrix":{"mongod":{"7.0.8-5":{"imagePath":"percona/percona-server-mongodb:7.0.8-5", "imageHash":"f81d1353d5497c5be36ee525f742d498ee6e1df9aba9502660c50f0fc98743b6", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pxc":{}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{}, "haproxy":{}, "backup":{"2.4.1":{"imagePath":"percona/percona-backup-mongodb:2.4.1", "imageHash":"a45d277af98090781a6149ccfb99d5bc4431ec53ba3b36ea644332851412a17e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.16.0":{"imagePath":"percona/percona-server-mongodb-operator:1.16.0", "imageHash":"30c92be499563a97e12e0542d3962c9cb10081f55722305a916eb98a229109c3", "imageHashArm64":"3fa8fffcf13f67526b0305ff5d048cdd2b5fd5fcb525ddbdf092b9548d93b302", "status":"recommended", "critical":false}}, "logCollector":{}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + OPERATOR_NAME=percona-server-mongodb-operator ++ echo '{"versions":[{"product":"psmdb-operator", "operator":"1.16.0", "matrix":{"mongod":{"7.0.8-5":{"imagePath":"percona/percona-server-mongodb:7.0.8-5", "imageHash":"f81d1353d5497c5be36ee525f742d498ee6e1df9aba9502660c50f0fc98743b6", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pxc":{}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{}, "haproxy":{}, "backup":{"2.4.1":{"imagePath":"percona/percona-backup-mongodb:2.4.1", "imageHash":"a45d277af98090781a6149ccfb99d5bc4431ec53ba3b36ea644332851412a17e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.16.0":{"imagePath":"percona/percona-server-mongodb-operator:1.16.0", "imageHash":"30c92be499563a97e12e0542d3962c9cb10081f55722305a916eb98a229109c3", "imageHashArm64":"3fa8fffcf13f67526b0305ff5d048cdd2b5fd5fcb525ddbdf092b9548d93b302", "status":"recommended", "critical":false}}, "logCollector":{}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.operator[].imagePath' + IMAGE=percona/percona-server-mongodb-operator:1.16.0 ++ echo perconalab/percona-server-mongodb-operator:PR-1559-6384b519 ++ cut -d/ -f1 + [[ perconalab == \p\e\r\c\o\n\a\l\a\b ]] + IMAGE='perconalab\/percona-server-mongodb-operator:1.16.0' ++ jq -r '.versions[].matrix.mongod[].imagePath' ++ echo '{"versions":[{"product":"psmdb-operator", "operator":"1.16.0", "matrix":{"mongod":{"7.0.8-5":{"imagePath":"percona/percona-server-mongodb:7.0.8-5", "imageHash":"f81d1353d5497c5be36ee525f742d498ee6e1df9aba9502660c50f0fc98743b6", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pxc":{}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{}, "haproxy":{}, "backup":{"2.4.1":{"imagePath":"percona/percona-backup-mongodb:2.4.1", "imageHash":"a45d277af98090781a6149ccfb99d5bc4431ec53ba3b36ea644332851412a17e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.16.0":{"imagePath":"percona/percona-server-mongodb-operator:1.16.0", "imageHash":"30c92be499563a97e12e0542d3962c9cb10081f55722305a916eb98a229109c3", "imageHashArm64":"3fa8fffcf13f67526b0305ff5d048cdd2b5fd5fcb525ddbdf092b9548d93b302", "status":"recommended", "critical":false}}, "logCollector":{}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + IMAGE_MONGOD=percona/percona-server-mongodb:7.0.8-5 ++ echo '{"versions":[{"product":"psmdb-operator", "operator":"1.16.0", "matrix":{"mongod":{"7.0.8-5":{"imagePath":"percona/percona-server-mongodb:7.0.8-5", "imageHash":"f81d1353d5497c5be36ee525f742d498ee6e1df9aba9502660c50f0fc98743b6", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pxc":{}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{}, "haproxy":{}, "backup":{"2.4.1":{"imagePath":"percona/percona-backup-mongodb:2.4.1", "imageHash":"a45d277af98090781a6149ccfb99d5bc4431ec53ba3b36ea644332851412a17e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.16.0":{"imagePath":"percona/percona-server-mongodb-operator:1.16.0", "imageHash":"30c92be499563a97e12e0542d3962c9cb10081f55722305a916eb98a229109c3", "imageHashArm64":"3fa8fffcf13f67526b0305ff5d048cdd2b5fd5fcb525ddbdf092b9548d93b302", "status":"recommended", "critical":false}}, "logCollector":{}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.pmm[].imagePath' + IMAGE_PMM_CLIENT=percona/pmm-client:2.41.2 ++ jq -r '.versions[].matrix.backup[].imagePath' ++ echo '{"versions":[{"product":"psmdb-operator", "operator":"1.16.0", "matrix":{"mongod":{"7.0.8-5":{"imagePath":"percona/percona-server-mongodb:7.0.8-5", "imageHash":"f81d1353d5497c5be36ee525f742d498ee6e1df9aba9502660c50f0fc98743b6", "imageHashArm64":"", "status":"recommended", "critical":false}}, "pxc":{}, "pmm":{"2.41.2":{"imagePath":"percona/pmm-client:2.41.2", "imageHash":"16d2499c1cbcc1af51bd3752fe7623b0d0a319ee128b12d41cadf8080d1ce56b", "imageHashArm64":"", "status":"recommended", "critical":false}}, "proxysql":{}, "haproxy":{}, "backup":{"2.4.1":{"imagePath":"percona/percona-backup-mongodb:2.4.1", "imageHash":"a45d277af98090781a6149ccfb99d5bc4431ec53ba3b36ea644332851412a17e", "imageHashArm64":"", "status":"recommended", "critical":false}}, "operator":{"1.16.0":{"imagePath":"percona/percona-server-mongodb-operator:1.16.0", "imageHash":"30c92be499563a97e12e0542d3962c9cb10081f55722305a916eb98a229109c3", "imageHashArm64":"3fa8fffcf13f67526b0305ff5d048cdd2b5fd5fcb525ddbdf092b9548d93b302", "status":"recommended", "critical":false}}, "logCollector":{}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + IMAGE_BACKUP=percona/percona-backup-mongodb:2.4.1 + [[ 1.17.0 == \1\.\1\6\.\0 ]] + main + rbac=rbac + '[' -n psmdb-operator ']' + rbac=cw-rbac + create_infra_gh upgrade-sharded-3254 v1.16.0 + local ns=upgrade-sharded-3254 + local git_tag=v1.16.0 + check_crd_for_deletion v1.16.0 + local git_tag=v1.16.0 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.16.0/deploy/crd.yaml ++ /usr/bin/sed s/---//g ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/perconaservermongodbbackups.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hzrio0boej +++ mktemp ++ local LAST_ERR=/tmp/tmp.xcB2krN9lz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/perconaservermongodbbackups.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hzrio0boej ++ cat /tmp/tmp.xcB2krN9lz ++ rm /tmp/tmp.hzrio0boej /tmp/tmp.xcB2krN9lz ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/perconaservermongodbrestores.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D4ivsKZMMc +++ mktemp ++ local LAST_ERR=/tmp/tmp.AHElZzYdyj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/perconaservermongodbrestores.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D4ivsKZMMc ++ cat /tmp/tmp.AHElZzYdyj ++ rm /tmp/tmp.D4ivsKZMMc /tmp/tmp.AHElZzYdyj ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/perconaservermongodbs.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xBHxNzXjwU +++ mktemp ++ local LAST_ERR=/tmp/tmp.eGEmw89Cwz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/perconaservermongodbs.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xBHxNzXjwU ++ cat /tmp/tmp.eGEmw89Cwz ++ rm /tmp/tmp.xBHxNzXjwU /tmp/tmp.eGEmw89Cwz ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.zpxhHgNBsu + local LAST_OUT=/tmp/tmp.5o2nMccm2U ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.bbKXWwubMy + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.SEKlNLQjRz + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zpxhHgNBsu + cat /tmp/tmp.bbKXWwubMy + rm /tmp/tmp.zpxhHgNBsu /tmp/tmp.bbKXWwubMy + return 0 namespace "cert-manager" deleted namespace "upgrade-sharded-3416" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5o2nMccm2U namespace "psmdb-operator" deleted + cat /tmp/tmp.SEKlNLQjRz + rm /tmp/tmp.5o2nMccm2U /tmp/tmp.SEKlNLQjRz + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.4wDb4EEq35 ++ mktemp + local LAST_ERR=/tmp/tmp.al5ZT8FBf6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4wDb4EEq35 + cat /tmp/tmp.al5ZT8FBf6 + rm /tmp/tmp.4wDb4EEq35 /tmp/tmp.al5ZT8FBf6 + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.qYR3tIKccS ++ mktemp + local LAST_ERR=/tmp/tmp.KZ9FgsQuzz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qYR3tIKccS namespace/psmdb-operator created + cat /tmp/tmp.KZ9FgsQuzz + rm /tmp/tmp.qYR3tIKccS /tmp/tmp.KZ9FgsQuzz + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.LG2IUB1GJ4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XznbTa9Ryn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LG2IUB1GJ4 ++ cat /tmp/tmp.XznbTa9Ryn ++ rm /tmp/tmp.LG2IUB1GJ4 /tmp/tmp.XznbTa9Ryn ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1559-6384b519-2-cluster2 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.dnl4uVgEcS ++ mktemp + local LAST_ERR=/tmp/tmp.YTm7dTOeJR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1559-6384b519-2-cluster2 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dnl4uVgEcS Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1559-6384b519-2-cluster2" modified. + cat /tmp/tmp.YTm7dTOeJR + rm /tmp/tmp.dnl4uVgEcS /tmp/tmp.YTm7dTOeJR + return 0 + deploy_operator_gh v1.16.0 + local git_tag=v1.16.0 + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + kubectl_bin apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.16.0/deploy/crd.yaml --server-side ++ mktemp + local LAST_OUT=/tmp/tmp.sh15ee135n ++ mktemp + local LAST_ERR=/tmp/tmp.mcVblUCujR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.16.0/deploy/crd.yaml --server-side + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sh15ee135n customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.mcVblUCujR + rm /tmp/tmp.sh15ee135n /tmp/tmp.mcVblUCujR + return 0 + local rbac_yaml=rbac + local operator_yaml=operator + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac + operator_yaml=cw-operator + kubectl_bin apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.16.0/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.1z3suBnj5p ++ mktemp + local LAST_ERR=/tmp/tmp.TZLnhfYCCe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.16.0/deploy/cw-rbac.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1z3suBnj5p clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator unchanged serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator unchanged + cat /tmp/tmp.TZLnhfYCCe + rm /tmp/tmp.1z3suBnj5p /tmp/tmp.TZLnhfYCCe + return 0 + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.16.0/deploy/cw-operator.yaml + /usr/bin/sed -i -e 's^image: .*^image: perconalab\/percona-server-mongodb-operator:1.16.0^' /tmp/tmp.dUyAX9GJTK/cw-operator_v1.16.0.yaml + kubectl_bin apply -f /tmp/tmp.dUyAX9GJTK/cw-operator_v1.16.0.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.6lzawvoCLt ++ mktemp + local LAST_ERR=/tmp/tmp.66NG4nUjq3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /tmp/tmp.dUyAX9GJTK/cw-operator_v1.16.0.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6lzawvoCLt deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.66NG4nUjq3 + rm /tmp/tmp.6lzawvoCLt /tmp/tmp.66NG4nUjq3 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.1RGE8TbnhO +++ mktemp ++ local LAST_ERR=/tmp/tmp.kG3jjI8SBF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1RGE8TbnhO ++ cat /tmp/tmp.kG3jjI8SBF ++ rm /tmp/tmp.1RGE8TbnhO /tmp/tmp.kG3jjI8SBF ++ return 0 + wait_pod percona-server-mongodb-operator-5c5df5c469-6t9w4 + local pod=percona-server-mongodb-operator-5c5df5c469-6t9w4 + set +o xtrace waiting for pod/percona-server-mongodb-operator-5c5df5c469-6t9w4 to be ready.OK + create_namespace upgrade-sharded-3254 + local namespace=upgrade-sharded-3254 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces upgrade-sharded-3254' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-sharded-3254 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-sharded-3254 --ignore-not-found + awk '{print$1}' ++ mktemp + kubectl_bin get ns ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.JC2qhTWP2B ++ mktemp + local LAST_OUT=/tmp/tmp.lEU7Ykz155 ++ mktemp + local LAST_ERR=/tmp/tmp.M3sK2Kv4wp + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.zPKzY5Dqod + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + for i in '$(seq 0 2)' + set +e + kubectl get ns + kubectl delete namespace upgrade-sharded-3254 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lEU7Ykz155 + cat /tmp/tmp.zPKzY5Dqod + rm /tmp/tmp.lEU7Ykz155 /tmp/tmp.zPKzY5Dqod + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JC2qhTWP2B + cat /tmp/tmp.M3sK2Kv4wp + rm /tmp/tmp.JC2qhTWP2B /tmp/tmp.M3sK2Kv4wp + return 0 + kubectl_bin wait --for=delete namespace upgrade-sharded-3254 ++ mktemp + local LAST_OUT=/tmp/tmp.NU4jKtoRPD ++ mktemp + local LAST_ERR=/tmp/tmp.3CqQMKxWau + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace upgrade-sharded-3254 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NU4jKtoRPD + cat /tmp/tmp.3CqQMKxWau + rm /tmp/tmp.NU4jKtoRPD /tmp/tmp.3CqQMKxWau + return 0 + desc 'create namespace upgrade-sharded-3254' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-sharded-3254 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-sharded-3254 ++ mktemp + local LAST_OUT=/tmp/tmp.DgQVtfzgWb ++ mktemp + local LAST_ERR=/tmp/tmp.F2483Q9AmR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace upgrade-sharded-3254 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DgQVtfzgWb namespace/upgrade-sharded-3254 created + cat /tmp/tmp.F2483Q9AmR + rm /tmp/tmp.DgQVtfzgWb /tmp/tmp.F2483Q9AmR + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.2b3HjsjTnf +++ mktemp ++ local LAST_ERR=/tmp/tmp.1VOMYYbvWd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2b3HjsjTnf ++ cat /tmp/tmp.1VOMYYbvWd ++ rm /tmp/tmp.2b3HjsjTnf /tmp/tmp.1VOMYYbvWd ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1559-6384b519-2-cluster2 --namespace=upgrade-sharded-3254 ++ mktemp + local LAST_OUT=/tmp/tmp.hQW153HdOz ++ mktemp + local LAST_ERR=/tmp/tmp.rPlKfmDpr5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1559-6384b519-2-cluster2 --namespace=upgrade-sharded-3254 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hQW153HdOz Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1559-6384b519-2-cluster2" modified. + cat /tmp/tmp.rPlKfmDpr5 + rm /tmp/tmp.hQW153HdOz /tmp/tmp.rPlKfmDpr5 + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.Il4jIjuuoX ++ mktemp + local LAST_ERR=/tmp/tmp.4QMFjBZErh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Il4jIjuuoX namespace/cert-manager created + cat /tmp/tmp.4QMFjBZErh + rm /tmp/tmp.Il4jIjuuoX /tmp/tmp.4QMFjBZErh + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.52QgzBtijS ++ mktemp + local LAST_ERR=/tmp/tmp.Bz9BOfTRaS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.52QgzBtijS namespace/cert-manager labeled + cat /tmp/tmp.Bz9BOfTRaS + rm /tmp/tmp.52QgzBtijS /tmp/tmp.Bz9BOfTRaS + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.Tmjg7Ao0eK ++ mktemp + local LAST_ERR=/tmp/tmp.nHO4xl2XaV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Tmjg7Ao0eK namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.nHO4xl2XaV Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.Tmjg7Ao0eK /tmp/tmp.nHO4xl2XaV + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.zCdinwU3yb ++ mktemp + local LAST_ERR=/tmp/tmp.C9qXmB3LHz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zCdinwU3yb pod/cert-manager-5658d944df-5tl4c condition met pod/cert-manager-cainjector-cb99ff845-mshkk condition met pod/cert-manager-webhook-7fd74b8dc7-hlxnw condition met + cat /tmp/tmp.C9qXmB3LHz + rm /tmp/tmp.zCdinwU3yb /tmp/tmp.C9qXmB3LHz + return 0 + sleep 120 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.4hebIbggwV ++ mktemp + local LAST_ERR=/tmp/tmp.Upf73YWWft + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4hebIbggwV secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.Upf73YWWft + rm /tmp/tmp.4hebIbggwV /tmp/tmp.Upf73YWWft + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Tue Jun 4 16:10:03 2024 NAMESPACE: upgrade-sharded-3254 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.upgrade-sharded-3254.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace upgrade-sharded-3254 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace upgrade-sharded-3254 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace upgrade-sharded-3254 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace upgrade-sharded-3254 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YUfQnIYgyX +++ mktemp ++ local LAST_ERR=/tmp/tmp.k6VKffGfRj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YUfQnIYgyX ++ cat /tmp/tmp.k6VKffGfRj ++ rm /tmp/tmp.YUfQnIYgyX /tmp/tmp.k6VKffGfRj ++ return 0 + MINIO_POD=minio-service-57dd49b-8swd9 + wait_pod minio-service-57dd49b-8swd9 + local pod=minio-service-57dd49b-8swd9 + set +o xtrace waiting for pod/minio-service-57dd49b-8swd9 to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.upgrade-sharded-3254.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.uWqLZQP65C ++ mktemp + local LAST_ERR=/tmp/tmp.JeD1vLpohE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.upgrade-sharded-3254.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uWqLZQP65C service/minio-service created + cat /tmp/tmp.JeD1vLpohE + rm /tmp/tmp.uWqLZQP65C /tmp/tmp.JeD1vLpohE + return 0 + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.rc9HPbRsLB ++ mktemp + local LAST_ERR=/tmp/tmp.qqUrFUpvI3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rc9HPbRsLB make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.qqUrFUpvI3 + rm /tmp/tmp.rc9HPbRsLB /tmp/tmp.qqUrFUpvI3 + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.16.0/deploy/secrets.yaml + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/e2e-tests/conf/client.yml -f /tmp/tmp.dUyAX9GJTK/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.JDDRUw0ISx ++ mktemp + local LAST_ERR=/tmp/tmp.dt0nKHg5lO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/e2e-tests/conf/client.yml -f /tmp/tmp.dUyAX9GJTK/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JDDRUw0ISx deployment.apps/psmdb-client created secret/my-cluster-name-secrets created + cat /tmp/tmp.dt0nKHg5lO + rm /tmp/tmp.JDDRUw0ISx /tmp/tmp.dt0nKHg5lO + return 0 + desc 'create first PSMDB cluster upgrade-sharded' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster upgrade-sharded ----------------------------------------------------------------------------------- + local cr_yaml=/tmp/tmp.dUyAX9GJTK/cr_v1.16.0.yaml + prepare_cr_yaml /tmp/tmp.dUyAX9GJTK/cr_v1.16.0.yaml + local cr_yaml=/tmp/tmp.dUyAX9GJTK/cr_v1.16.0.yaml + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.16.0/deploy/cr.yaml + yq eval ' .metadata.name = "upgrade-sharded" | .spec.upgradeOptions.apply = "disabled" | .spec.replsets[].size = 3 | .spec.replsets[].arbiter.enabled = false | .spec.backup.enabled = true | .spec.backup.tasks = [] | .spec.backup.pitr.enabled = false | .spec.backup.storages.minio.type = "s3" | .spec.backup.storages.minio.s3.credentialsSecret = "minio-secret" | .spec.backup.storages.minio.s3.region = "us-east-1" | .spec.backup.storages.minio.s3.bucket = "operator-testing" | .spec.backup.storages.minio.s3.endpointUrl = "http://minio-service:9000/" | .spec.backup.storages.minio.s3.insecureSkipTLSVerify = false | .spec.sharding.enabled = true | .spec.sharding.configsvrReplSet.size = 3 | .spec.sharding.mongos.size = 3 | .spec.image="" | .spec.image tag="!!null" | .spec.backup.image = "-backup" | .spec.pmm.image = "-pmm"' + apply_cluster /tmp/tmp.dUyAX9GJTK/cr_v1.16.0.yaml + '[' -z '' ']' + cat_config /tmp/tmp.dUyAX9GJTK/cr_v1.16.0.yaml + kubectl_bin apply -f - + cat /tmp/tmp.dUyAX9GJTK/cr_v1.16.0.yaml ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(.image == null)).image = "percona/percona-server-mongodb:7.0.8-5"' + local LAST_OUT=/tmp/tmp.e98LwD2kLT + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.41.2"' + yq eval '(.spec | select(has("backup"))).backup.image = "percona/percona-backup-mongodb:2.4.1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab\/percona-server-mongodb-operator:1.16.0"' ++ mktemp + local LAST_ERR=/tmp/tmp.8Nw749xwsf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e98LwD2kLT perconaservermongodb.psmdb.percona.com/upgrade-sharded created + cat /tmp/tmp.8Nw749xwsf + rm /tmp/tmp.e98LwD2kLT /tmp/tmp.8Nw749xwsf + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running upgrade-sharded-rs0 3 false + local name=upgrade-sharded-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-rs0-0 + local pod=upgrade-sharded-rs0-0 + set +o xtrace waiting for pod/upgrade-sharded-rs0-0 to be ready.......OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-rs0-1 + local pod=upgrade-sharded-rs0-1 + set +o xtrace waiting for pod/upgrade-sharded-rs0-1 to be ready.......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.53u4MqRKp7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SqAfr8kOPk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.53u4MqRKp7 ++ cat /tmp/tmp.SqAfr8kOPk ++ rm /tmp/tmp.53u4MqRKp7 /tmp/tmp.SqAfr8kOPk ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-sharded-rs0-2 + local pod=upgrade-sharded-rs0-2 + set +o xtrace waiting for pod/upgrade-sharded-rs0-2 to be ready..........OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BdJdDdtolL +++ mktemp ++ local LAST_ERR=/tmp/tmp.C3cTMbHzGF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BdJdDdtolL ++ cat /tmp/tmp.C3cTMbHzGF ++ rm /tmp/tmp.BdJdDdtolL /tmp/tmp.C3cTMbHzGF ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running upgrade-sharded-cfg 3 false + local name=upgrade-sharded-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-cfg-0 + local pod=upgrade-sharded-cfg-0 + set +o xtrace waiting for pod/upgrade-sharded-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-cfg-1 + local pod=upgrade-sharded-cfg-1 + set +o xtrace waiting for pod/upgrade-sharded-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K1ABmhs1hX +++ mktemp ++ local LAST_ERR=/tmp/tmp.r3t4QQyCPC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K1ABmhs1hX ++ cat /tmp/tmp.r3t4QQyCPC ++ rm /tmp/tmp.K1ABmhs1hX /tmp/tmp.r3t4QQyCPC ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod upgrade-sharded-cfg-2 + local pod=upgrade-sharded-cfg-2 + set +o xtrace waiting for pod/upgrade-sharded-cfg-2 to be ready.OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KJI6of0W7F +++ mktemp ++ local LAST_ERR=/tmp/tmp.nkt9O0spKA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KJI6of0W7F ++ cat /tmp/tmp.nkt9O0spKA ++ rm /tmp/tmp.KJI6of0W7F /tmp/tmp.nkt9O0spKA ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_cluster_consistency upgrade-sharded + local retry=0 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JVdzE54ydk +++ mktemp ++ local LAST_ERR=/tmp/tmp.gd4Vq8Mhvc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JVdzE54ydk ++ cat /tmp/tmp.gd4Vq8Mhvc ++ rm /tmp/tmp.JVdzE54ydk /tmp/tmp.gd4Vq8Mhvc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bGyC201doA +++ mktemp ++ local LAST_ERR=/tmp/tmp.WQ1bWM64ud ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bGyC201doA ++ cat /tmp/tmp.WQ1bWM64ud ++ rm /tmp/tmp.bGyC201doA /tmp/tmp.WQ1bWM64ud ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pC7GX8xZNw +++ mktemp ++ local LAST_ERR=/tmp/tmp.pSl5Mwb9TI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pC7GX8xZNw ++ cat /tmp/tmp.pSl5Mwb9TI ++ rm /tmp/tmp.pC7GX8xZNw /tmp/tmp.pSl5Mwb9TI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BYqgyiqNKx +++ mktemp ++ local LAST_ERR=/tmp/tmp.tTNADG0UIt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BYqgyiqNKx ++ cat /tmp/tmp.tTNADG0UIt ++ rm /tmp/tmp.BYqgyiqNKx /tmp/tmp.tTNADG0UIt ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3WFcgnecs7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DoGKk5L3W2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3WFcgnecs7 ++ cat /tmp/tmp.DoGKk5L3W2 ++ rm /tmp/tmp.3WFcgnecs7 /tmp/tmp.DoGKk5L3W2 ++ return 0 + [[ 3 == \3 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.es6EwHarDc +++ mktemp ++ local LAST_ERR=/tmp/tmp.ObfMwfGRbc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.es6EwHarDc ++ cat /tmp/tmp.ObfMwfGRbc ++ rm /tmp/tmp.es6EwHarDc /tmp/tmp.ObfMwfGRbc ++ return 0 + [[ 3 == \3 ]] + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@upgrade-sharded-mongos.upgrade-sharded-3254 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@upgrade-sharded-mongos.upgrade-sharded-3254 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Pl2uofCREp +++ mktemp ++ local LAST_ERR=/tmp/tmp.4DnnUV0yuz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Pl2uofCREp ++ cat /tmp/tmp.4DnnUV0yuz ++ rm /tmp/tmp.Pl2uofCREp /tmp/tmp.4DnnUV0yuz ++ return 0 + local client_container=psmdb-client-7469665986-8pt6f + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-8pt6f -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@upgrade-sharded-mongos.upgrade-sharded-3254.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Z2B36OZyAt ++ mktemp + local LAST_ERR=/tmp/tmp.MTmUvjiDy5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8pt6f -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@upgrade-sharded-mongos.upgrade-sharded-3254.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z2B36OZyAt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-sharded-mongos.upgrade-sharded-3254.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("bf098499-cccd-4b1f-b86f-a7a798d0fec6") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.MTmUvjiDy5 + rm /tmp/tmp.Z2B36OZyAt /tmp/tmp.MTmUvjiDy5 + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-3254 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-3254 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5dWmKen5La +++ mktemp ++ local LAST_ERR=/tmp/tmp.pCsakOKfFg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5dWmKen5La ++ cat /tmp/tmp.pCsakOKfFg ++ rm /tmp/tmp.5dWmKen5La /tmp/tmp.pCsakOKfFg ++ return 0 + local client_container=psmdb-client-7469665986-8pt6f + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-8pt6f -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-3254.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.4eDXe7yuLT ++ mktemp + local LAST_ERR=/tmp/tmp.pYwtwBEpUc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-8pt6f -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-3254.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4eDXe7yuLT Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-sharded-mongos.upgrade-sharded-3254.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("f56558b1-7a39-4a68-a4f9-df1bbfa98236") } Percona Server for MongoDB server version: v7.0.8-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.pYwtwBEpUc + rm /tmp/tmp.4eDXe7yuLT /tmp/tmp.pYwtwBEpUc + return 0 + compare_generation 1 statefulset upgrade-sharded-rs0 + local generation=1 + local resource=statefulset + local name=upgrade-sharded-rs0 + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Yp3l9CoHsu +++ mktemp ++ local LAST_ERR=/tmp/tmp.WV8Ku3dPAS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-sharded-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Yp3l9CoHsu ++ cat /tmp/tmp.WV8Ku3dPAS ++ rm /tmp/tmp.Yp3l9CoHsu /tmp/tmp.WV8Ku3dPAS ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 statefulset upgrade-sharded-cfg + local generation=1 + local resource=statefulset + local name=upgrade-sharded-cfg + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2esF3VQoWJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.RHRGEVJYI5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-sharded-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2esF3VQoWJ ++ cat /tmp/tmp.RHRGEVJYI5 ++ rm /tmp/tmp.2esF3VQoWJ /tmp/tmp.RHRGEVJYI5 ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 statefulset upgrade-sharded-mongos + local generation=1 + local resource=statefulset + local name=upgrade-sharded-mongos + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-mongos -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i3b4s2jaPz +++ mktemp ++ local LAST_ERR=/tmp/tmp.hbLYjPpHZL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-sharded-mongos -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.i3b4s2jaPz ++ cat /tmp/tmp.hbLYjPpHZL ++ rm /tmp/tmp.i3b4s2jaPz /tmp/tmp.hbLYjPpHZL ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 psmdb upgrade-sharded + local generation=1 + local resource=psmdb + local name=upgrade-sharded + local current_generation ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZsuQx8AURU +++ mktemp ++ local LAST_ERR=/tmp/tmp.GReZVtOX8Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZsuQx8AURU ++ cat /tmp/tmp.GReZVtOX8Y ++ rm /tmp/tmp.ZsuQx8AURU /tmp/tmp.GReZVtOX8Y ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + backup_name_minio=backup-minio + desc 'create backup backup-minio' + set +o xtrace ----------------------------------------------------------------------------------- create backup backup-minio ----------------------------------------------------------------------------------- + wait_backup_agent upgrade-sharded-rs0-0 + local agent_pod=upgrade-sharded-rs0-0 + set +o xtrace upgrade-sharded-rs0-0 + wait_backup_agent upgrade-sharded-rs0-1 + local agent_pod=upgrade-sharded-rs0-1 + set +o xtrace upgrade-sharded-rs0-1 + wait_backup_agent upgrade-sharded-rs0-2 + local agent_pod=upgrade-sharded-rs0-2 + set +o xtrace upgrade-sharded-rs0-2 + wait_backup_agent upgrade-sharded-cfg-0 + local agent_pod=upgrade-sharded-cfg-0 + set +o xtrace upgrade-sharded-cfg-0 + wait_backup_agent upgrade-sharded-cfg-1 + local agent_pod=upgrade-sharded-cfg-1 + set +o xtrace upgrade-sharded-cfg-1 + wait_backup_agent upgrade-sharded-cfg-2 + local agent_pod=upgrade-sharded-cfg-2 + set +o xtrace upgrade-sharded-cfg-2 + run_backup minio + local storage=minio + local backup_name=backup-minio + desc 'run backup backup-minio' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-minio" | .spec.storageName = "minio"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/e2e-tests/upgrade-sharded/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.mhj9JUZMg0 ++ mktemp + local LAST_ERR=/tmp/tmp.mycUfQ6sCL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mhj9JUZMg0 perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.mycUfQ6sCL + rm /tmp/tmp.mhj9JUZMg0 /tmp/tmp.mycUfQ6sCL + return 0 + wait_backup backup-minio + local backup_name=backup-minio + set +o xtrace backup-minio..................... + desc 'upgrade operator' + set +o xtrace ----------------------------------------------------------------------------------- upgrade operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.oM6JkCathW ++ mktemp + local LAST_ERR=/tmp/tmp.cCbVbTREeR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oM6JkCathW customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.cCbVbTREeR + rm /tmp/tmp.oM6JkCathW /tmp/tmp.cCbVbTREeR + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/deploy/cw-rbac.yaml -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.XaHaUCPzXk ++ mktemp + local LAST_ERR=/tmp/tmp.A7lA6WV6Af + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1559/deploy/cw-rbac.yaml -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XaHaUCPzXk clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator unchanged serviceaccount/percona-server-mongodb-operator unchanged clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator unchanged + cat /tmp/tmp.A7lA6WV6Af + rm /tmp/tmp.XaHaUCPzXk /tmp/tmp.A7lA6WV6Af + return 0 + desc 'use new image perconalab/percona-server-mongodb-operator:PR-1559-6384b519' + set +o xtrace ----------------------------------------------------------------------------------- use new image perconalab/percona-server-mongodb-operator:PR-1559-6384b519 ----------------------------------------------------------------------------------- + kubectl_bin patch deployment -n psmdb-operator percona-server-mongodb-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-server-mongodb-operator","image":"perconalab/percona-server-mongodb-operator:PR-1559-6384b519"}]}}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.gScFNUh66y ++ mktemp + local LAST_ERR=/tmp/tmp.S2byrlS4PQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch deployment -n psmdb-operator percona-server-mongodb-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-server-mongodb-operator","image":"perconalab/percona-server-mongodb-operator:PR-1559-6384b519"}]}}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gScFNUh66y deployment.apps/percona-server-mongodb-operator patched + cat /tmp/tmp.S2byrlS4PQ + rm /tmp/tmp.gScFNUh66y /tmp/tmp.S2byrlS4PQ + return 0 + kubectl_bin rollout status deployment/percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.f6RfCwxHl6 ++ mktemp + local LAST_ERR=/tmp/tmp.HW9UAD8c6Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl rollout status deployment/percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f6RfCwxHl6 Waiting for deployment "percona-server-mongodb-operator" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "percona-server-mongodb-operator" rollout to finish: 1 old replicas are pending termination... deployment "percona-server-mongodb-operator" successfully rolled out + cat /tmp/tmp.HW9UAD8c6Z + rm /tmp/tmp.f6RfCwxHl6 /tmp/tmp.HW9UAD8c6Z + return 0 + desc 'wait for operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- wait for operator upgrade ----------------------------------------------------------------------------------- ++ kubectl_bin get pods -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ grep -vc NAME ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9ZSeWVVH44 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eaLJoB7jRj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9ZSeWVVH44 ++ cat /tmp/tmp.eaLJoB7jRj ++ rm /tmp/tmp.9ZSeWVVH44 /tmp/tmp.eaLJoB7jRj ++ return 0 + [[ 1 -eq 1 ]] + sleep 10 + desc 'check images and generation after operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after operator upgrade ----------------------------------------------------------------------------------- + wait_for_running upgrade-sharded-rs0 3 false + local name=upgrade-sharded-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-rs0-0 + local pod=upgrade-sharded-rs0-0 + set +o xtrace waiting for pod/upgrade-sharded-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-rs0-1 + local pod=upgrade-sharded-rs0-1 + set +o xtrace waiting for pod/upgrade-sharded-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LXufjEXtur +++ mktemp ++ local LAST_ERR=/tmp/tmp.DupWOXJZWp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LXufjEXtur ++ cat /tmp/tmp.DupWOXJZWp ++ rm /tmp/tmp.LXufjEXtur /tmp/tmp.DupWOXJZWp ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-sharded-rs0-2 + local pod=upgrade-sharded-rs0-2 + set +o xtrace waiting for pod/upgrade-sharded-rs0-2 to be ready.OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.89ilI5sG4Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.6aVi3iSCj9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.89ilI5sG4Q ++ cat /tmp/tmp.6aVi3iSCj9 ++ rm /tmp/tmp.89ilI5sG4Q /tmp/tmp.6aVi3iSCj9 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running upgrade-sharded-cfg 3 false + local name=upgrade-sharded-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-cfg-0 + local pod=upgrade-sharded-cfg-0 + set +o xtrace waiting for pod/upgrade-sharded-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-cfg-1 + local pod=upgrade-sharded-cfg-1 + set +o xtrace waiting for pod/upgrade-sharded-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vgJ41rI4qK +++ mktemp ++ local LAST_ERR=/tmp/tmp.wO6vtAPXWV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vgJ41rI4qK ++ cat /tmp/tmp.wO6vtAPXWV ++ rm /tmp/tmp.vgJ41rI4qK /tmp/tmp.wO6vtAPXWV ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod upgrade-sharded-cfg-2 + local pod=upgrade-sharded-cfg-2 + set +o xtrace waiting for pod/upgrade-sharded-cfg-2 to be ready.OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BvP2GbGtDV +++ mktemp ++ local LAST_ERR=/tmp/tmp.gKrwI87KBV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BvP2GbGtDV ++ cat /tmp/tmp.gKrwI87KBV ++ rm /tmp/tmp.BvP2GbGtDV /tmp/tmp.gKrwI87KBV ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_cluster_consistency upgrade-sharded + local retry=0 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.spggJ0tYog +++ mktemp ++ local LAST_ERR=/tmp/tmp.pithYHyggz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.spggJ0tYog ++ cat /tmp/tmp.pithYHyggz ++ rm /tmp/tmp.spggJ0tYog /tmp/tmp.pithYHyggz ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KJUVjduc6i +++ mktemp ++ local LAST_ERR=/tmp/tmp.kF9B3xywLO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KJUVjduc6i ++ cat /tmp/tmp.kF9B3xywLO ++ rm /tmp/tmp.KJUVjduc6i /tmp/tmp.kF9B3xywLO ++ return 0 + [[ 3 == \3 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p4tsdOO4jV +++ mktemp ++ local LAST_ERR=/tmp/tmp.JNaj8SCKvo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p4tsdOO4jV ++ cat /tmp/tmp.JNaj8SCKvo ++ rm /tmp/tmp.p4tsdOO4jV /tmp/tmp.JNaj8SCKvo ++ return 0 + [[ 3 == \3 ]] + check_applied_images operator + local updated_image=operator + case "${updated_image}" in ++ kubectl_bin get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vncgYKTfvG +++ mktemp ++ local LAST_ERR=/tmp/tmp.b4gTesXxY7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vncgYKTfvG ++ cat /tmp/tmp.b4gTesXxY7 ++ rm /tmp/tmp.vncgYKTfvG /tmp/tmp.b4gTesXxY7 ++ return 0 + [[ perconalab/percona-server-mongodb-operator:PR-1559-6384b519 == perconalab/percona-server-mongodb-operator:PR-1559-6384b519 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.14yCq5HGHF +++ mktemp ++ local LAST_ERR=/tmp/tmp.yJJRkjhLvi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.14yCq5HGHF ++ cat /tmp/tmp.yJJRkjhLvi ++ rm /tmp/tmp.14yCq5HGHF /tmp/tmp.yJJRkjhLvi ++ return 0 + [[ percona/percona-backup-mongodb:2.4.1 == percona/percona-backup-mongodb:2.4.1 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MMwCn9jYJS +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hnz8HQyKPW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MMwCn9jYJS ++ cat /tmp/tmp.Hnz8HQyKPW ++ rm /tmp/tmp.MMwCn9jYJS /tmp/tmp.Hnz8HQyKPW ++ return 0 + [[ percona/pmm-client:2.41.2 == percona/pmm-client:2.41.2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0pNQUtfkTI +++ mktemp ++ local LAST_ERR=/tmp/tmp.R3Xs76dttS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0pNQUtfkTI ++ cat /tmp/tmp.R3Xs76dttS ++ rm /tmp/tmp.0pNQUtfkTI /tmp/tmp.R3Xs76dttS ++ return 0 + [[ percona/percona-server-mongodb:7.0.8-5 == percona/percona-server-mongodb:7.0.8-5 ]] + : Operator image has been updated correctly + compare_generation 1 statefulset upgrade-sharded-rs0 + local generation=1 + local resource=statefulset + local name=upgrade-sharded-rs0 + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xGolDkAe2K +++ mktemp ++ local LAST_ERR=/tmp/tmp.QxztgyU9sL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-sharded-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xGolDkAe2K ++ cat /tmp/tmp.QxztgyU9sL ++ rm /tmp/tmp.xGolDkAe2K /tmp/tmp.QxztgyU9sL ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 statefulset upgrade-sharded-cfg + local generation=1 + local resource=statefulset + local name=upgrade-sharded-cfg + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yyeEKEKMFa +++ mktemp ++ local LAST_ERR=/tmp/tmp.jX0olACX1i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-sharded-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yyeEKEKMFa ++ cat /tmp/tmp.jX0olACX1i ++ rm /tmp/tmp.yyeEKEKMFa /tmp/tmp.jX0olACX1i ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 statefulset upgrade-sharded-mongos + local generation=1 + local resource=statefulset + local name=upgrade-sharded-mongos + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-mongos -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wjhRWoviUw +++ mktemp ++ local LAST_ERR=/tmp/tmp.puOmsvqkn3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset upgrade-sharded-mongos -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wjhRWoviUw ++ cat /tmp/tmp.puOmsvqkn3 ++ rm /tmp/tmp.wjhRWoviUw /tmp/tmp.puOmsvqkn3 ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 psmdb upgrade-sharded + local generation=1 + local resource=psmdb + local name=upgrade-sharded + local current_generation ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TqXI7i8euO +++ mktemp ++ local LAST_ERR=/tmp/tmp.leA26OQsMr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TqXI7i8euO ++ cat /tmp/tmp.leA26OQsMr ++ rm /tmp/tmp.TqXI7i8euO /tmp/tmp.leA26OQsMr ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'patch psmdb images and upgrade' + set +o xtrace ----------------------------------------------------------------------------------- patch psmdb images and upgrade ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb upgrade-sharded --type=merge --patch '{ "spec": { "crVersion": "1.17.0", "image": "perconalab/percona-server-mongodb-operator:main-mongod7.0", "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "backup": { "image": "perconalab/percona-server-mongodb-operator:main-backup" } }}' ++ mktemp + local LAST_OUT=/tmp/tmp.hboTAjkVBU ++ mktemp + local LAST_ERR=/tmp/tmp.czZy7R7LB0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb upgrade-sharded --type=merge --patch '{ "spec": { "crVersion": "1.17.0", "image": "perconalab/percona-server-mongodb-operator:main-mongod7.0", "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "backup": { "image": "perconalab/percona-server-mongodb-operator:main-backup" } }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hboTAjkVBU perconaservermongodb.psmdb.percona.com/upgrade-sharded patched + cat /tmp/tmp.czZy7R7LB0 + rm /tmp/tmp.hboTAjkVBU /tmp/tmp.czZy7R7LB0 + return 0 + sleep 10 + desc 'check cluster after full upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check cluster after full upgrade ----------------------------------------------------------------------------------- + wait_for_running upgrade-sharded-rs0 3 false + local name=upgrade-sharded-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-rs0-0 + local pod=upgrade-sharded-rs0-0 + set +o xtrace waiting for pod/upgrade-sharded-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-rs0-1 + local pod=upgrade-sharded-rs0-1 + set +o xtrace waiting for pod/upgrade-sharded-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AWNmR1RPva +++ mktemp ++ local LAST_ERR=/tmp/tmp.4weySA4ET3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AWNmR1RPva ++ cat /tmp/tmp.4weySA4ET3 ++ rm /tmp/tmp.AWNmR1RPva /tmp/tmp.4weySA4ET3 ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-sharded-rs0-2 + local pod=upgrade-sharded-rs0-2 + set +o xtrace waiting for pod/upgrade-sharded-rs0-2 to be ready.OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZbD6Pk9Jrj +++ mktemp ++ local LAST_ERR=/tmp/tmp.4DJshMpj2F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZbD6Pk9Jrj ++ cat /tmp/tmp.4DJshMpj2F ++ rm /tmp/tmp.ZbD6Pk9Jrj /tmp/tmp.4DJshMpj2F ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running upgrade-sharded-cfg 3 false + local name=upgrade-sharded-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-cfg-0 + local pod=upgrade-sharded-cfg-0 + set +o xtrace waiting for pod/upgrade-sharded-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-cfg-1 + local pod=upgrade-sharded-cfg-1 + set +o xtrace waiting for pod/upgrade-sharded-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UG7TM6Op2v +++ mktemp ++ local LAST_ERR=/tmp/tmp.w6vK1W8MZx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UG7TM6Op2v ++ cat /tmp/tmp.w6vK1W8MZx ++ rm /tmp/tmp.UG7TM6Op2v /tmp/tmp.w6vK1W8MZx ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod upgrade-sharded-cfg-2 + local pod=upgrade-sharded-cfg-2 + set +o xtrace waiting for pod/upgrade-sharded-cfg-2 to be ready.OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SQ61suzJQl +++ mktemp ++ local LAST_ERR=/tmp/tmp.gLuwVzugd0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SQ61suzJQl ++ cat /tmp/tmp.gLuwVzugd0 ++ rm /tmp/tmp.SQ61suzJQl /tmp/tmp.gLuwVzugd0 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_cluster_consistency upgrade-sharded + local retry=0 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mIx3J4MRHt +++ mktemp ++ local LAST_ERR=/tmp/tmp.KxDVMSwniN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mIx3J4MRHt ++ cat /tmp/tmp.KxDVMSwniN ++ rm /tmp/tmp.mIx3J4MRHt /tmp/tmp.KxDVMSwniN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fmu62uFrYR +++ mktemp ++ local LAST_ERR=/tmp/tmp.ffAexIE3lR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fmu62uFrYR ++ cat /tmp/tmp.ffAexIE3lR ++ rm /tmp/tmp.fmu62uFrYR /tmp/tmp.ffAexIE3lR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q1j9QrlP4q +++ mktemp ++ local LAST_ERR=/tmp/tmp.0Sjtk1eAYf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q1j9QrlP4q ++ cat /tmp/tmp.0Sjtk1eAYf ++ rm /tmp/tmp.Q1j9QrlP4q /tmp/tmp.0Sjtk1eAYf ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vHMTaFjSRV +++ mktemp ++ local LAST_ERR=/tmp/tmp.HdR72iXy47 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vHMTaFjSRV ++ cat /tmp/tmp.HdR72iXy47 ++ rm /tmp/tmp.vHMTaFjSRV /tmp/tmp.HdR72iXy47 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pO6Kg9Qhqj +++ mktemp ++ local LAST_ERR=/tmp/tmp.6XbaCOBmux ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pO6Kg9Qhqj ++ cat /tmp/tmp.6XbaCOBmux ++ rm /tmp/tmp.pO6Kg9Qhqj /tmp/tmp.6XbaCOBmux ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Lhwzo3NJvC +++ mktemp ++ local LAST_ERR=/tmp/tmp.1l9AfMilu4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Lhwzo3NJvC ++ cat /tmp/tmp.1l9AfMilu4 ++ rm /tmp/tmp.Lhwzo3NJvC /tmp/tmp.1l9AfMilu4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5rF78VnIgN +++ mktemp ++ local LAST_ERR=/tmp/tmp.BokBFew6UQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5rF78VnIgN ++ cat /tmp/tmp.BokBFew6UQ ++ rm /tmp/tmp.5rF78VnIgN /tmp/tmp.BokBFew6UQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oxgXIDZTLH +++ mktemp ++ local LAST_ERR=/tmp/tmp.RxhuvycJIP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oxgXIDZTLH ++ cat /tmp/tmp.RxhuvycJIP ++ rm /tmp/tmp.oxgXIDZTLH /tmp/tmp.RxhuvycJIP ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nZIoezEfsr +++ mktemp ++ local LAST_ERR=/tmp/tmp.HjlNu5UYxc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nZIoezEfsr ++ cat /tmp/tmp.HjlNu5UYxc ++ rm /tmp/tmp.nZIoezEfsr /tmp/tmp.HjlNu5UYxc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VWUl8IXdIZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.rbgUbjLeuD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VWUl8IXdIZ ++ cat /tmp/tmp.rbgUbjLeuD ++ rm /tmp/tmp.VWUl8IXdIZ /tmp/tmp.rbgUbjLeuD ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R1akyiaDrr +++ mktemp ++ local LAST_ERR=/tmp/tmp.r1Z0iESxLc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.R1akyiaDrr ++ cat /tmp/tmp.r1Z0iESxLc ++ rm /tmp/tmp.R1akyiaDrr /tmp/tmp.r1Z0iESxLc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8qUOdZmvzu +++ mktemp ++ local LAST_ERR=/tmp/tmp.bHljB8hots ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8qUOdZmvzu ++ cat /tmp/tmp.bHljB8hots ++ rm /tmp/tmp.8qUOdZmvzu /tmp/tmp.bHljB8hots ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i3EZRfTGZO +++ mktemp ++ local LAST_ERR=/tmp/tmp.PahhPvJCVD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.i3EZRfTGZO ++ cat /tmp/tmp.PahhPvJCVD ++ rm /tmp/tmp.i3EZRfTGZO /tmp/tmp.PahhPvJCVD ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q7wJLdoT8C +++ mktemp ++ local LAST_ERR=/tmp/tmp.DzCGE4FwpJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q7wJLdoT8C ++ cat /tmp/tmp.DzCGE4FwpJ ++ rm /tmp/tmp.q7wJLdoT8C /tmp/tmp.DzCGE4FwpJ ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gArH8Na5Zb +++ mktemp ++ local LAST_ERR=/tmp/tmp.snmzc6Jo2g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gArH8Na5Zb ++ cat /tmp/tmp.snmzc6Jo2g ++ rm /tmp/tmp.gArH8Na5Zb /tmp/tmp.snmzc6Jo2g ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LxkmNLMNXj +++ mktemp ++ local LAST_ERR=/tmp/tmp.91Ch8EdQKq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LxkmNLMNXj ++ cat /tmp/tmp.91Ch8EdQKq ++ rm /tmp/tmp.LxkmNLMNXj /tmp/tmp.91Ch8EdQKq ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Obi1NXeEoE +++ mktemp ++ local LAST_ERR=/tmp/tmp.LHLmMghwYu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Obi1NXeEoE ++ cat /tmp/tmp.LHLmMghwYu ++ rm /tmp/tmp.Obi1NXeEoE /tmp/tmp.LHLmMghwYu ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aZROg9NMnj +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZKOp2i2E48 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aZROg9NMnj ++ cat /tmp/tmp.ZKOp2i2E48 ++ rm /tmp/tmp.aZROg9NMnj /tmp/tmp.ZKOp2i2E48 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 18 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cISPf9MTQU +++ mktemp ++ local LAST_ERR=/tmp/tmp.K2P44RJ8jR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cISPf9MTQU ++ cat /tmp/tmp.K2P44RJ8jR ++ rm /tmp/tmp.cISPf9MTQU /tmp/tmp.K2P44RJ8jR ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 19 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s9E08beY5V +++ mktemp ++ local LAST_ERR=/tmp/tmp.lZiZJq4cPf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s9E08beY5V ++ cat /tmp/tmp.lZiZJq4cPf ++ rm /tmp/tmp.s9E08beY5V /tmp/tmp.lZiZJq4cPf ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 20 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.seUo28z4Dx +++ mktemp ++ local LAST_ERR=/tmp/tmp.fPGchdfKKI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.seUo28z4Dx ++ cat /tmp/tmp.fPGchdfKKI ++ rm /tmp/tmp.seUo28z4Dx /tmp/tmp.fPGchdfKKI ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 21 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cHJ2wjNRBq +++ mktemp ++ local LAST_ERR=/tmp/tmp.sse55MlmEq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cHJ2wjNRBq ++ cat /tmp/tmp.sse55MlmEq ++ rm /tmp/tmp.cHJ2wjNRBq /tmp/tmp.sse55MlmEq ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 22 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fWFlFZVqP1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zip4hALRks ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fWFlFZVqP1 ++ cat /tmp/tmp.Zip4hALRks ++ rm /tmp/tmp.fWFlFZVqP1 /tmp/tmp.Zip4hALRks ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 23 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UwgQo93uMT +++ mktemp ++ local LAST_ERR=/tmp/tmp.NPUnOUlQEq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UwgQo93uMT ++ cat /tmp/tmp.NPUnOUlQEq ++ rm /tmp/tmp.UwgQo93uMT /tmp/tmp.NPUnOUlQEq ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 24 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aMWfmnOYSO +++ mktemp ++ local LAST_ERR=/tmp/tmp.eJx2oNhGYZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aMWfmnOYSO ++ cat /tmp/tmp.eJx2oNhGYZ ++ rm /tmp/tmp.aMWfmnOYSO /tmp/tmp.eJx2oNhGYZ ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 25 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MpQLw8ulrT +++ mktemp ++ local LAST_ERR=/tmp/tmp.NyLlZYsPMV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MpQLw8ulrT ++ cat /tmp/tmp.NyLlZYsPMV ++ rm /tmp/tmp.MpQLw8ulrT /tmp/tmp.NyLlZYsPMV ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 26 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZrT0y5O2P3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5Q0QrR8fi6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZrT0y5O2P3 ++ cat /tmp/tmp.5Q0QrR8fi6 ++ rm /tmp/tmp.ZrT0y5O2P3 /tmp/tmp.5Q0QrR8fi6 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 27 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KiXmWaEVEE +++ mktemp ++ local LAST_ERR=/tmp/tmp.GkgtigikH8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KiXmWaEVEE ++ cat /tmp/tmp.GkgtigikH8 ++ rm /tmp/tmp.KiXmWaEVEE /tmp/tmp.GkgtigikH8 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 28 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2RdLRUZuWF +++ mktemp ++ local LAST_ERR=/tmp/tmp.OBaFu1k2ip ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2RdLRUZuWF ++ cat /tmp/tmp.OBaFu1k2ip ++ rm /tmp/tmp.2RdLRUZuWF /tmp/tmp.OBaFu1k2ip ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 29 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GdFyJZ3ub0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.l552td3we7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GdFyJZ3ub0 ++ cat /tmp/tmp.l552td3we7 ++ rm /tmp/tmp.GdFyJZ3ub0 /tmp/tmp.l552td3we7 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 30 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.49f34y5pPa +++ mktemp ++ local LAST_ERR=/tmp/tmp.f6JX2CyF30 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.49f34y5pPa ++ cat /tmp/tmp.f6JX2CyF30 ++ rm /tmp/tmp.49f34y5pPa /tmp/tmp.f6JX2CyF30 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 31 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oTM4b9gCmn +++ mktemp ++ local LAST_ERR=/tmp/tmp.ckhytefAXJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oTM4b9gCmn ++ cat /tmp/tmp.ckhytefAXJ ++ rm /tmp/tmp.oTM4b9gCmn /tmp/tmp.ckhytefAXJ ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 32 -ge 32 ']' + echo max retry count 32 reached. something went wrong with operator or kubernetes cluster max retry count 32 reached. something went wrong with operator or kubernetes cluster + exit 1