Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/logs/upgrade-sharded.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + cluster=upgrade-sharded + CLUSTER_SIZE=3 + TARGET_OPERATOR_VER=1.22.0 + TARGET_IMAGE=perconalab/percona-server-mongodb-operator:PR-2151-135050b2 + TARGET_IMAGE_MONGOD=perconalab/percona-server-mongodb-operator:main-mongod8.0 + TARGET_IMAGE_PMM_CLIENT=percona/pmm-client:2.44.1-1 + TARGET_IMAGE_BACKUP=perconalab/percona-server-mongodb-operator:main-backup ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod8.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod8.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ local 'cli=mongod --version' +++ local pod_name=24104 +++ kubectl_bin -n default run 24104 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0FXVuFYyHM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aah1s8toK6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 24104 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0FXVuFYyHM +++ cat /tmp/tmp.aah1s8toK6 +++ rm /tmp/tmp.0FXVuFYyHM /tmp/tmp.aah1s8toK6 +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/24104 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OVz87jQBin ++++ mktemp +++ local LAST_ERR=/tmp/tmp.krtLILSzSo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/24104 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OVz87jQBin +++ cat /tmp/tmp.krtLILSzSo +++ rm /tmp/tmp.OVz87jQBin /tmp/tmp.krtLILSzSo +++ return 0 ++++ kubectl_bin -n default exec 24104 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.UOvyKWmSIP +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.DgTaNdliXH ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 24104 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.UOvyKWmSIP ++++ cat /tmp/tmp.DgTaNdliXH ++++ rm /tmp/tmp.UOvyKWmSIP /tmp/tmp.DgTaNdliXH ++++ return 0 +++ local 'output=db version v8.0.16-5 Build Info: { "version": "8.0.16-5", "gitVersion": "f174b291665d4de5c104d07bff06eabe7a913868", "openSSLVersion": "OpenSSL 3.5.1 1 Jul 2025", "modules": [], "perconaFeatures": [ "MemoryEngine", "HotBackup", "BackupCursorAggregationStage", "BackupCursorExtendAggregationStage", "AWSIAM", "Kerberos", "LDAP", "OIDC", "TDE", "FIPSMode", "FCBIS", "Auditing", "ProfilingRateLimit", "LogRedaction", "ngram" ], "allocator": "tcmalloc-google", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/24104 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NuHpg8VOTL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6A2waYSZqA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/24104 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.NuHpg8VOTL +++ cat /tmp/tmp.6A2waYSZqA Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.NuHpg8VOTL /tmp/tmp.6A2waYSZqA +++ return 0 +++ echo db version v8.0.16-5 Build Info: '{' '"version":' '"8.0.16-5",' '"gitVersion":' '"f174b291665d4de5c104d07bff06eabe7a913868",' '"openSSLVersion":' '"OpenSSL' 3.5.1 1 Jul '2025",' '"modules":' '[],' '"perconaFeatures":' '[' '"MemoryEngine",' '"HotBackup",' '"BackupCursorAggregationStage",' '"BackupCursorExtendAggregationStage",' '"AWSIAM",' '"Kerberos",' '"LDAP",' '"OIDC",' '"TDE",' '"FIPSMode",' '"FCBIS",' '"Auditing",' '"ProfilingRateLimit",' '"LogRedaction",' '"ngram"' '],' '"allocator":' '"tcmalloc-google",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=8.0.16-5 ++ [[ ! 8.0.16-5 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 8.0.16-5 + FULL_VER=8.0.16-5 + MONGO_VER=8.0 ++ curl -s https://check.percona.com/versions/v1/psmdb-operator ++ jq -r '.versions[].operator' ++ sort -V ++ tail -n1 + INIT_OPERATOR_VER=1.21.1 + [[ 1.21.1 == \1\.\2\2\.\0 ]] + GIT_TAG=v1.21.1 + case $(curl -s -o /dev/null -w "%{http_code}" 'https://check.percona.com/versions/v1/psmdb-operator/'${INIT_OPERATOR_VER}/'latest?databaseVersion='${MONGO_VER}'') in ++ curl -s -o /dev/null -w '%{http_code}' 'https://check.percona.com/versions/v1/psmdb-operator/1.21.1/latest?databaseVersion=8.0' ++ curl -s 'https://check.percona.com/versions/v1/psmdb-operator/1.21.1/latest?databaseVersion=8.0' + INIT_OPERATOR_IMAGES='{"versions":[{"product":"psmdb-operator","operator":"1.21.1","matrix":{"mongod":{"8.0.12-4":{"imagePath":"percona/percona-server-mongodb:8.0.12-4","imageHash":"ab8793879409788b5a19f7e332a3700520e8eeaf4b068ec8cc7d1b680f097307","imageHashArm64":"d367e225b57783bc2ff8451571c7568dc3b240176cf149a01cc3a7b13fb52a78","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"available","critical":false},"3.4.1":{"imagePath":"percona/pmm-client:3.4.1","imageHash":"1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{},"haproxy":{},"backup":{"2.11.0":{"imagePath":"percona/percona-backup-mongodb:2.11.0","imageHash":"d09f5de92cfbc5a7a42a8cc86742a07481c98b3b42cffdc6359b3ec1f63de3a5","imageHashArm64":"a60d095439537b982209582d428b3b39a01e31e88b2b62d2dcbd99ea4e2d9928","status":"recommended","critical":false}},"operator":{"1.21.1":{"imagePath":"percona/percona-server-mongodb-operator:1.21.1","imageHash":"155f6ee71dcfc52ff30ed4e2c4396fc3d3534c83b4794de4d90c79542fbb0e34","imageHashArm64":"88926b82a5551c36592d1c83b2e80d3c3560f0809cdb7b5d6648038123b65097","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' + OPERATOR_NAME=percona-server-mongodb-operator ++ jq -r '.versions[].matrix.operator[].imagePath' ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.21.1","matrix":{"mongod":{"8.0.12-4":{"imagePath":"percona/percona-server-mongodb:8.0.12-4","imageHash":"ab8793879409788b5a19f7e332a3700520e8eeaf4b068ec8cc7d1b680f097307","imageHashArm64":"d367e225b57783bc2ff8451571c7568dc3b240176cf149a01cc3a7b13fb52a78","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"available","critical":false},"3.4.1":{"imagePath":"percona/pmm-client:3.4.1","imageHash":"1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{},"haproxy":{},"backup":{"2.11.0":{"imagePath":"percona/percona-backup-mongodb:2.11.0","imageHash":"d09f5de92cfbc5a7a42a8cc86742a07481c98b3b42cffdc6359b3ec1f63de3a5","imageHashArm64":"a60d095439537b982209582d428b3b39a01e31e88b2b62d2dcbd99ea4e2d9928","status":"recommended","critical":false}},"operator":{"1.21.1":{"imagePath":"percona/percona-server-mongodb-operator:1.21.1","imageHash":"155f6ee71dcfc52ff30ed4e2c4396fc3d3534c83b4794de4d90c79542fbb0e34","imageHashArm64":"88926b82a5551c36592d1c83b2e80d3c3560f0809cdb7b5d6648038123b65097","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' + IMAGE=percona/percona-server-mongodb-operator:1.21.1 ++ echo perconalab/percona-server-mongodb-operator:PR-2151-135050b2 ++ cut -d/ -f1 + [[ perconalab == \p\e\r\c\o\n\a\l\a\b ]] + IMAGE=perconalab/percona-server-mongodb-operator:1.21.1 ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.21.1","matrix":{"mongod":{"8.0.12-4":{"imagePath":"percona/percona-server-mongodb:8.0.12-4","imageHash":"ab8793879409788b5a19f7e332a3700520e8eeaf4b068ec8cc7d1b680f097307","imageHashArm64":"d367e225b57783bc2ff8451571c7568dc3b240176cf149a01cc3a7b13fb52a78","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"available","critical":false},"3.4.1":{"imagePath":"percona/pmm-client:3.4.1","imageHash":"1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{},"haproxy":{},"backup":{"2.11.0":{"imagePath":"percona/percona-backup-mongodb:2.11.0","imageHash":"d09f5de92cfbc5a7a42a8cc86742a07481c98b3b42cffdc6359b3ec1f63de3a5","imageHashArm64":"a60d095439537b982209582d428b3b39a01e31e88b2b62d2dcbd99ea4e2d9928","status":"recommended","critical":false}},"operator":{"1.21.1":{"imagePath":"percona/percona-server-mongodb-operator:1.21.1","imageHash":"155f6ee71dcfc52ff30ed4e2c4396fc3d3534c83b4794de4d90c79542fbb0e34","imageHashArm64":"88926b82a5551c36592d1c83b2e80d3c3560f0809cdb7b5d6648038123b65097","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.mongod[].imagePath' + IMAGE_MONGOD=percona/percona-server-mongodb:8.0.12-4 ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.21.1","matrix":{"mongod":{"8.0.12-4":{"imagePath":"percona/percona-server-mongodb:8.0.12-4","imageHash":"ab8793879409788b5a19f7e332a3700520e8eeaf4b068ec8cc7d1b680f097307","imageHashArm64":"d367e225b57783bc2ff8451571c7568dc3b240176cf149a01cc3a7b13fb52a78","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"available","critical":false},"3.4.1":{"imagePath":"percona/pmm-client:3.4.1","imageHash":"1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{},"haproxy":{},"backup":{"2.11.0":{"imagePath":"percona/percona-backup-mongodb:2.11.0","imageHash":"d09f5de92cfbc5a7a42a8cc86742a07481c98b3b42cffdc6359b3ec1f63de3a5","imageHashArm64":"a60d095439537b982209582d428b3b39a01e31e88b2b62d2dcbd99ea4e2d9928","status":"recommended","critical":false}},"operator":{"1.21.1":{"imagePath":"percona/percona-server-mongodb-operator:1.21.1","imageHash":"155f6ee71dcfc52ff30ed4e2c4396fc3d3534c83b4794de4d90c79542fbb0e34","imageHashArm64":"88926b82a5551c36592d1c83b2e80d3c3560f0809cdb7b5d6648038123b65097","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.pmm[].imagePath' + IMAGE_PMM_CLIENT='percona/pmm-client:2.44.1-1 percona/pmm-client:3.4.1' ++ jq -r '.versions[].matrix.backup[].imagePath' ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.21.1","matrix":{"mongod":{"8.0.12-4":{"imagePath":"percona/percona-server-mongodb:8.0.12-4","imageHash":"ab8793879409788b5a19f7e332a3700520e8eeaf4b068ec8cc7d1b680f097307","imageHashArm64":"d367e225b57783bc2ff8451571c7568dc3b240176cf149a01cc3a7b13fb52a78","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"","status":"available","critical":false},"3.4.1":{"imagePath":"percona/pmm-client:3.4.1","imageHash":"1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b","imageHashArm64":"","status":"recommended","critical":false}},"proxysql":{},"haproxy":{},"backup":{"2.11.0":{"imagePath":"percona/percona-backup-mongodb:2.11.0","imageHash":"d09f5de92cfbc5a7a42a8cc86742a07481c98b3b42cffdc6359b3ec1f63de3a5","imageHashArm64":"a60d095439537b982209582d428b3b39a01e31e88b2b62d2dcbd99ea4e2d9928","status":"recommended","critical":false}},"operator":{"1.21.1":{"imagePath":"percona/percona-server-mongodb-operator:1.21.1","imageHash":"155f6ee71dcfc52ff30ed4e2c4396fc3d3534c83b4794de4d90c79542fbb0e34","imageHashArm64":"88926b82a5551c36592d1c83b2e80d3c3560f0809cdb7b5d6648038123b65097","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' + IMAGE_BACKUP=percona/percona-backup-mongodb:2.11.0 + [[ 1.22.0 == \1\.\2\1\.\1 ]] + main + rbac=rbac + '[' -n psmdb-operator ']' + rbac=cw-rbac + create_infra_gh upgrade-sharded-715 v1.21.1 + local ns=upgrade-sharded-715 + local git_tag=v1.21.1 + check_crd_for_deletion v1.21.1 + local git_tag=v1.21.1 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.1/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/perconaservermongodbbackups.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CupKyErYGH +++ mktemp ++ local LAST_ERR=/tmp/tmp.cXblOPlyRF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/perconaservermongodbbackups.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CupKyErYGH ++ cat /tmp/tmp.cXblOPlyRF ++ rm /tmp/tmp.CupKyErYGH /tmp/tmp.cXblOPlyRF ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/perconaservermongodbrestores.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uyib4Tnxsi +++ mktemp ++ local LAST_ERR=/tmp/tmp.1YIQFBAia6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/perconaservermongodbrestores.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Uyib4Tnxsi ++ cat /tmp/tmp.1YIQFBAia6 ++ rm /tmp/tmp.Uyib4Tnxsi /tmp/tmp.1YIQFBAia6 ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/perconaservermongodbs.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YMPwkl13BL +++ mktemp ++ local LAST_ERR=/tmp/tmp.ApVpD6A40U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/perconaservermongodbs.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YMPwkl13BL ++ cat /tmp/tmp.ApVpD6A40U ++ rm /tmp/tmp.YMPwkl13BL /tmp/tmp.ApVpD6A40U ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.oYG8T51tHs + local LAST_OUT=/tmp/tmp.WUraC1xwQz ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.AlKz2H13EO + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.GnDFCE0GYK + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oYG8T51tHs + cat /tmp/tmp.AlKz2H13EO + rm /tmp/tmp.oYG8T51tHs /tmp/tmp.AlKz2H13EO + return 0 namespace "cert-manager" deleted namespace "upgrade-sharded-9124" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WUraC1xwQz namespace "psmdb-operator" deleted + cat /tmp/tmp.GnDFCE0GYK + rm /tmp/tmp.WUraC1xwQz /tmp/tmp.GnDFCE0GYK + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.EOjblhXjIL ++ mktemp + local LAST_ERR=/tmp/tmp.GAS1cZTOeI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EOjblhXjIL + cat /tmp/tmp.GAS1cZTOeI + rm /tmp/tmp.EOjblhXjIL /tmp/tmp.GAS1cZTOeI + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.WIm9aE3fAl ++ mktemp + local LAST_ERR=/tmp/tmp.N8aC8xIFtH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WIm9aE3fAl namespace/psmdb-operator created + cat /tmp/tmp.N8aC8xIFtH + rm /tmp/tmp.WIm9aE3fAl /tmp/tmp.N8aC8xIFtH + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.XYaEJixHFM +++ mktemp ++ local LAST_ERR=/tmp/tmp.zWYNlefM83 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XYaEJixHFM ++ cat /tmp/tmp.zWYNlefM83 ++ rm /tmp/tmp.XYaEJixHFM /tmp/tmp.zWYNlefM83 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2151-135050b2-3-cluster7 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ESdOHawn1X ++ mktemp + local LAST_ERR=/tmp/tmp.eoMQZNSc3N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2151-135050b2-3-cluster7 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ESdOHawn1X Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2151-135050b2-3-cluster7" modified. + cat /tmp/tmp.eoMQZNSc3N + rm /tmp/tmp.ESdOHawn1X /tmp/tmp.eoMQZNSc3N + return 0 + deploy_operator_gh v1.21.1 + local git_tag=v1.21.1 + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + kubectl_bin apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.1/deploy/crd.yaml --server-side ++ mktemp + local LAST_OUT=/tmp/tmp.oBXemPzFCs ++ mktemp + local LAST_ERR=/tmp/tmp.52dH2hS1x7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.1/deploy/crd.yaml --server-side + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oBXemPzFCs customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.52dH2hS1x7 + rm /tmp/tmp.oBXemPzFCs /tmp/tmp.52dH2hS1x7 + return 0 + local rbac_yaml=rbac + local operator_yaml=operator + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac + operator_yaml=cw-operator + kubectl_bin apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.1/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.nitIXrMve3 ++ mktemp + local LAST_ERR=/tmp/tmp.PC2NH322PS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.1/deploy/cw-rbac.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nitIXrMve3 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator unchanged serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator unchanged + cat /tmp/tmp.PC2NH322PS + rm /tmp/tmp.nitIXrMve3 /tmp/tmp.PC2NH322PS + return 0 + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.1/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:1.21.1") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /tmp/tmp.HyXxiG9f2c/cw-operator_v1.21.1.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.yyFMAAc3f4 ++ mktemp + local LAST_ERR=/tmp/tmp.J6N3CuqBbq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yyFMAAc3f4 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.J6N3CuqBbq + rm /tmp/tmp.yyFMAAc3f4 /tmp/tmp.J6N3CuqBbq + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.507BN7aifV +++ mktemp ++ local LAST_ERR=/tmp/tmp.Orsak8BbZC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.507BN7aifV ++ cat /tmp/tmp.Orsak8BbZC ++ rm /tmp/tmp.507BN7aifV /tmp/tmp.Orsak8BbZC ++ return 0 + wait_operator_pod percona-server-mongodb-operator-f7fd94694-hwrgt + local pod=percona-server-mongodb-operator-f7fd94694-hwrgt + set +o xtrace waiting for pod/percona-server-mongodb-operator-f7fd94694-hwrgt to be ready..OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.uJhO4hpWiI +++ mktemp ++ local LAST_ERR=/tmp/tmp.b6Uclpr9Gb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uJhO4hpWiI ++ cat /tmp/tmp.b6Uclpr9Gb ++ rm /tmp/tmp.uJhO4hpWiI /tmp/tmp.b6Uclpr9Gb ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-f7fd94694-hwrgt ++ mktemp + local LAST_OUT=/tmp/tmp.Kh5RWBoHTP ++ mktemp + local LAST_ERR=/tmp/tmp.d1msP67Q9a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-f7fd94694-hwrgt + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Kh5RWBoHTP + cat /tmp/tmp.d1msP67Q9a + rm /tmp/tmp.Kh5RWBoHTP /tmp/tmp.d1msP67Q9a + return 0 2025-12-17T15:39:26.964Z INFO setup Manager starting up {"gitCommit": "1edf59995f7612a767b58c646eaead9ff1fe25ba", "gitBranch": "release-1-21-1", "buildTime": "", "goVersion": "go1.25.3", "os": "linux", "arch": "amd64"} + create_namespace upgrade-sharded-715 + local namespace=upgrade-sharded-715 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces upgrade-sharded-715' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-sharded-715 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-sharded-715 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.oxtRa8T6DL egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.E5pxgtjhn0 + local LAST_ERR=/tmp/tmp.ZN9GwAq5uK + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.ZhU1amU91c + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace upgrade-sharded-715 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oxtRa8T6DL + cat /tmp/tmp.ZN9GwAq5uK + rm /tmp/tmp.oxtRa8T6DL /tmp/tmp.ZN9GwAq5uK + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E5pxgtjhn0 + cat /tmp/tmp.ZhU1amU91c + rm /tmp/tmp.E5pxgtjhn0 /tmp/tmp.ZhU1amU91c + return 0 + kubectl_bin wait --for=delete namespace upgrade-sharded-715 ++ mktemp + local LAST_OUT=/tmp/tmp.TiLpPLKHnw ++ mktemp + local LAST_ERR=/tmp/tmp.nEBEowoNzh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace upgrade-sharded-715 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TiLpPLKHnw + cat /tmp/tmp.nEBEowoNzh + rm /tmp/tmp.TiLpPLKHnw /tmp/tmp.nEBEowoNzh + return 0 + desc 'create namespace upgrade-sharded-715' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-sharded-715 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-sharded-715 ++ mktemp + local LAST_OUT=/tmp/tmp.zgDYjP1C7k ++ mktemp + local LAST_ERR=/tmp/tmp.byd3EMzlSG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace upgrade-sharded-715 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zgDYjP1C7k namespace/upgrade-sharded-715 created + cat /tmp/tmp.byd3EMzlSG + rm /tmp/tmp.zgDYjP1C7k /tmp/tmp.byd3EMzlSG + return 0 + set_kube_ctx upgrade-sharded-715 + local namespace=upgrade-sharded-715 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.f3C3JrIWC1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZUf7E6x6Bm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f3C3JrIWC1 ++ cat /tmp/tmp.ZUf7E6x6Bm ++ rm /tmp/tmp.f3C3JrIWC1 /tmp/tmp.ZUf7E6x6Bm ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2151-135050b2-3-cluster7 --namespace=upgrade-sharded-715 ++ mktemp + local LAST_OUT=/tmp/tmp.nUb4Zt514E ++ mktemp + local LAST_ERR=/tmp/tmp.0IGTLxZaH6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2151-135050b2-3-cluster7 --namespace=upgrade-sharded-715 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nUb4Zt514E Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2151-135050b2-3-cluster7" modified. + cat /tmp/tmp.0IGTLxZaH6 + rm /tmp/tmp.nUb4Zt514E /tmp/tmp.0IGTLxZaH6 + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.flBFhsBLET ++ mktemp + local LAST_ERR=/tmp/tmp.QDpAXAaEb0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.flBFhsBLET namespace/cert-manager created + cat /tmp/tmp.QDpAXAaEb0 + rm /tmp/tmp.flBFhsBLET /tmp/tmp.QDpAXAaEb0 + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.y0s4Zty606 ++ mktemp + local LAST_ERR=/tmp/tmp.30Dz7orhN9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y0s4Zty606 namespace/cert-manager labeled + cat /tmp/tmp.30Dz7orhN9 + rm /tmp/tmp.y0s4Zty606 /tmp/tmp.30Dz7orhN9 + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.CBpzEZIrVC ++ mktemp + local LAST_ERR=/tmp/tmp.bD1psDXayB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CBpzEZIrVC namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.bD1psDXayB Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.CBpzEZIrVC /tmp/tmp.bD1psDXayB + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.hNK96bhgeG ++ mktemp + local LAST_ERR=/tmp/tmp.7ivNHq7Z1Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hNK96bhgeG pod/cert-manager-cainjector-5dc9c8b4f7-js644 condition met pod/cert-manager-df4b69479-7btqw condition met pod/cert-manager-webhook-769bbb594d-kdqmg condition met + cat /tmp/tmp.7ivNHq7Z1Y + rm /tmp/tmp.hNK96bhgeG /tmp/tmp.7ivNHq7Z1Y + return 0 + sleep 120 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.R8kbU3rk7a ++ mktemp + local LAST_ERR=/tmp/tmp.Czf7h191zg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R8kbU3rk7a secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created secret/gcp-cs-sa-key-secret created + cat /tmp/tmp.Czf7h191zg + rm /tmp/tmp.R8kbU3rk7a /tmp/tmp.Czf7h191zg + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Wed Dec 17 15:42:13 2025 NAMESPACE: upgrade-sharded-715 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.upgrade-sharded-715.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace upgrade-sharded-715 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace upgrade-sharded-715 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace upgrade-sharded-715 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace upgrade-sharded-715 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H1AO4L4Usa +++ mktemp ++ local LAST_ERR=/tmp/tmp.1yoOJ5amo2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H1AO4L4Usa ++ cat /tmp/tmp.1yoOJ5amo2 ++ rm /tmp/tmp.H1AO4L4Usa /tmp/tmp.1yoOJ5amo2 ++ return 0 + MINIO_POD=minio-service-d9589b474-2kh4p + wait_pod minio-service-d9589b474-2kh4p + local pod=minio-service-d9589b474-2kh4p + set +o xtrace waiting for pod/minio-service-d9589b474-2kh4p to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.upgrade-sharded-715.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.ZQN4XOSENo ++ mktemp + local LAST_ERR=/tmp/tmp.vUgX9UhgCC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.upgrade-sharded-715.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZQN4XOSENo service/minio-service created + cat /tmp/tmp.vUgX9UhgCC + rm /tmp/tmp.ZQN4XOSENo /tmp/tmp.vUgX9UhgCC + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.ano28UHTSJ ++ mktemp + local LAST_ERR=/tmp/tmp.mgnqqOceXe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ano28UHTSJ pod "aws-cli" deleted from upgrade-sharded-715 namespace + cat /tmp/tmp.mgnqqOceXe All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.ano28UHTSJ /tmp/tmp.mgnqqOceXe + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.1/deploy/secrets.yaml + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/conf/client.yml -f /tmp/tmp.HyXxiG9f2c/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Ks0Lt9Eg0m ++ mktemp + local LAST_ERR=/tmp/tmp.IvRONwJJd5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/conf/client.yml -f /tmp/tmp.HyXxiG9f2c/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ks0Lt9Eg0m deployment.apps/psmdb-client created secret/my-cluster-name-secrets created + cat /tmp/tmp.IvRONwJJd5 + rm /tmp/tmp.Ks0Lt9Eg0m /tmp/tmp.IvRONwJJd5 + return 0 + desc 'create first PSMDB cluster upgrade-sharded' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster upgrade-sharded ----------------------------------------------------------------------------------- + local cr_yaml=/tmp/tmp.HyXxiG9f2c/cr_v1.21.1.yaml + prepare_cr_yaml /tmp/tmp.HyXxiG9f2c/cr_v1.21.1.yaml + local cr_yaml=/tmp/tmp.HyXxiG9f2c/cr_v1.21.1.yaml + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.1/deploy/cr.yaml + yq eval ' .metadata.name = "upgrade-sharded" | .spec.upgradeOptions.apply = "disabled" | .spec.replsets[].size = 3 | .spec.replsets[].arbiter.enabled = false | .spec.backup.enabled = true | .spec.backup.tasks = [] | .spec.backup.pitr.enabled = false | .spec.backup.storages.minio.type = "s3" | .spec.backup.storages.minio.s3.credentialsSecret = "minio-secret" | .spec.backup.storages.minio.s3.region = "us-east-1" | .spec.backup.storages.minio.s3.bucket = "operator-testing" | .spec.backup.storages.minio.s3.endpointUrl = "http://minio-service:9000/" | .spec.backup.storages.minio.s3.insecureSkipTLSVerify = false | .spec.sharding.enabled = true | .spec.sharding.configsvrReplSet.size = 3 | .spec.sharding.mongos.size = 3 | .spec.image="" | .spec.image tag="!!null" | .spec.backup.image = "-backup" | .spec.pmm.image = "-pmm"' + apply_cluster /tmp/tmp.HyXxiG9f2c/cr_v1.21.1.yaml + '[' -z '' ']' + cat_config /tmp/tmp.HyXxiG9f2c/cr_v1.21.1.yaml + kubectl_bin apply -f - + cat /tmp/tmp.HyXxiG9f2c/cr_v1.21.1.yaml + yq eval '(.spec | select(.image == null)).image = "percona/percona-server-mongodb:8.0.12-4"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1 percona/pmm-client:3.4.1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:1.21.1"' + yq eval '(.spec | select(has("backup"))).backup.image = "percona/percona-backup-mongodb:2.11.0"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.pLRox3qrDQ ++ mktemp + local LAST_ERR=/tmp/tmp.2nHl14k4EJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pLRox3qrDQ perconaservermongodb.psmdb.percona.com/upgrade-sharded created + cat /tmp/tmp.2nHl14k4EJ + rm /tmp/tmp.pLRox3qrDQ /tmp/tmp.2nHl14k4EJ + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running upgrade-sharded-rs0 3 false + local name=upgrade-sharded-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-rs0-0 + local pod=upgrade-sharded-rs0-0 + set +o xtrace waiting for pod/upgrade-sharded-rs0-0 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-rs0-1 + local pod=upgrade-sharded-rs0-1 + set +o xtrace waiting for pod/upgrade-sharded-rs0-1 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hcKppxnIRP +++ mktemp ++ local LAST_ERR=/tmp/tmp.gR2MTvyO4z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hcKppxnIRP ++ cat /tmp/tmp.gR2MTvyO4z ++ rm /tmp/tmp.hcKppxnIRP /tmp/tmp.gR2MTvyO4z ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-sharded-rs0-2 + local pod=upgrade-sharded-rs0-2 + set +o xtrace waiting for pod/upgrade-sharded-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NrZ2jMExj8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RWGeTxFVgm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NrZ2jMExj8 ++ cat /tmp/tmp.RWGeTxFVgm ++ rm /tmp/tmp.NrZ2jMExj8 /tmp/tmp.RWGeTxFVgm ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qpcFPfSgM7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.p8aTPEGhIy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qpcFPfSgM7 ++ cat /tmp/tmp.p8aTPEGhIy ++ rm /tmp/tmp.qpcFPfSgM7 /tmp/tmp.p8aTPEGhIy ++ return 0 + [[ false == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running upgrade-sharded-cfg 3 false + local name=upgrade-sharded-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-cfg-0 + local pod=upgrade-sharded-cfg-0 + set +o xtrace waiting for pod/upgrade-sharded-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-cfg-1 + local pod=upgrade-sharded-cfg-1 + set +o xtrace waiting for pod/upgrade-sharded-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zfJDHu7uof +++ mktemp ++ local LAST_ERR=/tmp/tmp.VbzYDJyLnz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zfJDHu7uof ++ cat /tmp/tmp.VbzYDJyLnz ++ rm /tmp/tmp.zfJDHu7uof /tmp/tmp.VbzYDJyLnz ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod upgrade-sharded-cfg-2 + local pod=upgrade-sharded-cfg-2 + set +o xtrace waiting for pod/upgrade-sharded-cfg-2 to be ready.OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ye8ZW31J4u +++ mktemp ++ local LAST_ERR=/tmp/tmp.HWj0prx1QX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ye8ZW31J4u ++ cat /tmp/tmp.HWj0prx1QX ++ rm /tmp/tmp.Ye8ZW31J4u /tmp/tmp.HWj0prx1QX ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uuGBweMvlp +++ mktemp ++ local LAST_ERR=/tmp/tmp.6mqwkcA6TI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uuGBweMvlp ++ cat /tmp/tmp.6mqwkcA6TI ++ rm /tmp/tmp.uuGBweMvlp /tmp/tmp.6mqwkcA6TI ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_cluster_consistency upgrade-sharded + local retry=0 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RISucEkkqa +++ mktemp ++ local LAST_ERR=/tmp/tmp.kgfDpctrhP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RISucEkkqa ++ cat /tmp/tmp.kgfDpctrhP ++ rm /tmp/tmp.RISucEkkqa /tmp/tmp.kgfDpctrhP ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jKLexxj3cO +++ mktemp ++ local LAST_ERR=/tmp/tmp.qkaZZTslCo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jKLexxj3cO ++ cat /tmp/tmp.qkaZZTslCo ++ rm /tmp/tmp.jKLexxj3cO /tmp/tmp.qkaZZTslCo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wFN3E1xwtQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.CePQDXljCL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wFN3E1xwtQ ++ cat /tmp/tmp.CePQDXljCL ++ rm /tmp/tmp.wFN3E1xwtQ /tmp/tmp.CePQDXljCL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q5V8RWxjqH +++ mktemp ++ local LAST_ERR=/tmp/tmp.ahbJ1HsgRW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q5V8RWxjqH ++ cat /tmp/tmp.ahbJ1HsgRW ++ rm /tmp/tmp.Q5V8RWxjqH /tmp/tmp.ahbJ1HsgRW ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MUe3SUsial +++ mktemp ++ local LAST_ERR=/tmp/tmp.t7lQ5Rjf9K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MUe3SUsial ++ cat /tmp/tmp.t7lQ5Rjf9K ++ rm /tmp/tmp.MUe3SUsial /tmp/tmp.t7lQ5Rjf9K ++ return 0 + [[ 3 == \3 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SntVLIM9LZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.U4h0Ekm0cO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SntVLIM9LZ ++ cat /tmp/tmp.U4h0Ekm0cO ++ rm /tmp/tmp.SntVLIM9LZ /tmp/tmp.U4h0Ekm0cO ++ return 0 + [[ 3 == \3 ]] + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@upgrade-sharded-mongos.upgrade-sharded-715 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@upgrade-sharded-mongos.upgrade-sharded-715 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yLtFbkygIf +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZQ7Y9OmQ2R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yLtFbkygIf ++ cat /tmp/tmp.ZQ7Y9OmQ2R ++ rm /tmp/tmp.yLtFbkygIf /tmp/tmp.ZQ7Y9OmQ2R ++ return 0 + local client_container=psmdb-client-696897d69b-ngt5t + kubectl_bin exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.TJmVHOjgSo ++ mktemp + local LAST_ERR=/tmp/tmp.bZ2rcRYk8f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TJmVHOjgSo Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("078c7b53-7305-4671-b1a4-27746f604206") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.bZ2rcRYk8f + rm /tmp/tmp.TJmVHOjgSo /tmp/tmp.bZ2rcRYk8f + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FrqwnQIKbe +++ mktemp ++ local LAST_ERR=/tmp/tmp.ekCPjh4ct8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FrqwnQIKbe ++ cat /tmp/tmp.ekCPjh4ct8 ++ rm /tmp/tmp.FrqwnQIKbe /tmp/tmp.ekCPjh4ct8 ++ return 0 + local client_container=psmdb-client-696897d69b-ngt5t + kubectl_bin exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.2sBnNc5tWm ++ mktemp + local LAST_ERR=/tmp/tmp.r03G9KVJz7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2sBnNc5tWm Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("36dededb-6cd0-4a11-a2bc-bb9ba2876c71") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.r03G9KVJz7 + rm /tmp/tmp.2sBnNc5tWm /tmp/tmp.r03G9KVJz7 + return 0 + compare_generation 1 statefulset upgrade-sharded-rs0 + local generation=1 + local resource=statefulset + local name=upgrade-sharded-rs0 + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VWKzO25AEg +++ mktemp ++ local LAST_ERR=/tmp/tmp.F3uTNEQ1RO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-sharded-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VWKzO25AEg ++ cat /tmp/tmp.F3uTNEQ1RO ++ rm /tmp/tmp.VWKzO25AEg /tmp/tmp.F3uTNEQ1RO ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 statefulset upgrade-sharded-cfg + local generation=1 + local resource=statefulset + local name=upgrade-sharded-cfg + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AVlo2vjX0M +++ mktemp ++ local LAST_ERR=/tmp/tmp.O5AbO5e66I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-sharded-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AVlo2vjX0M ++ cat /tmp/tmp.O5AbO5e66I ++ rm /tmp/tmp.AVlo2vjX0M /tmp/tmp.O5AbO5e66I ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 statefulset upgrade-sharded-mongos + local generation=1 + local resource=statefulset + local name=upgrade-sharded-mongos + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-mongos -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l1u5uvuSad +++ mktemp ++ local LAST_ERR=/tmp/tmp.dRNAiotGLk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-sharded-mongos -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l1u5uvuSad ++ cat /tmp/tmp.dRNAiotGLk ++ rm /tmp/tmp.l1u5uvuSad /tmp/tmp.dRNAiotGLk ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 psmdb upgrade-sharded + local generation=1 + local resource=psmdb + local name=upgrade-sharded + local current_generation ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gvUp9KN86I +++ mktemp ++ local LAST_ERR=/tmp/tmp.DCAi1582th ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gvUp9KN86I ++ cat /tmp/tmp.DCAi1582th ++ rm /tmp/tmp.gvUp9KN86I /tmp/tmp.DCAi1582th ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + backup_name_minio=backup-minio + desc 'create backup backup-minio' + set +o xtrace ----------------------------------------------------------------------------------- create backup backup-minio ----------------------------------------------------------------------------------- + wait_backup_agent upgrade-sharded-rs0-0 + local agent_pod=upgrade-sharded-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in upgrade-sharded-rs0-0...2025-12-17T15:45:35.000+0000 I listening for the commands + wait_backup_agent upgrade-sharded-rs0-1 + local agent_pod=upgrade-sharded-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in upgrade-sharded-rs0-1...2025-12-17T15:45:37.000+0000 I listening for the commands + wait_backup_agent upgrade-sharded-rs0-2 + local agent_pod=upgrade-sharded-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in upgrade-sharded-rs0-2...2025-12-17T15:45:36.000+0000 I listening for the commands + wait_backup_agent upgrade-sharded-cfg-0 + local agent_pod=upgrade-sharded-cfg-0 + set +o xtrace waiting for pbm-agent to be ready in upgrade-sharded-cfg-0...2025-12-17T15:44:28.000+0000 I listening for the commands + wait_backup_agent upgrade-sharded-cfg-1 + local agent_pod=upgrade-sharded-cfg-1 + set +o xtrace waiting for pbm-agent to be ready in upgrade-sharded-cfg-1...2025-12-17T15:44:56.000+0000 I listening for the commands + wait_backup_agent upgrade-sharded-cfg-2 + local agent_pod=upgrade-sharded-cfg-2 + set +o xtrace waiting for pbm-agent to be ready in upgrade-sharded-cfg-2...2025-12-17T15:44:59.000+0000 I listening for the commands + run_backup minio + local storage=minio + local backup_name=backup-minio + local type=logical + desc 'run backup backup-minio' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-minio" | .spec.storageName = "minio" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/upgrade-sharded/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.i7v2KUiZMC ++ mktemp + local LAST_ERR=/tmp/tmp.dOJn9ejUjf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i7v2KUiZMC perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.dOJn9ejUjf + rm /tmp/tmp.i7v2KUiZMC /tmp/tmp.dOJn9ejUjf + return 0 + wait_backup backup-minio + local backup_name=backup-minio + local target_state=ready + set +o xtrace waiting for backup-minio to reach ready state................OK + desc 'upgrade operator' + set +o xtrace ----------------------------------------------------------------------------------- upgrade operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.bSFqD8ekwg ++ mktemp + local LAST_ERR=/tmp/tmp.4Ok8RxkHmI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bSFqD8ekwg customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.4Ok8RxkHmI + rm /tmp/tmp.bSFqD8ekwg /tmp/tmp.4Ok8RxkHmI + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/deploy/cw-rbac.yaml -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.VHP802y5Rh ++ mktemp + local LAST_ERR=/tmp/tmp.UFJJEIoL2f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/deploy/cw-rbac.yaml -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VHP802y5Rh clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator unchanged serviceaccount/percona-server-mongodb-operator unchanged clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator unchanged + cat /tmp/tmp.UFJJEIoL2f + rm /tmp/tmp.VHP802y5Rh /tmp/tmp.UFJJEIoL2f + return 0 + desc 'use new image perconalab/percona-server-mongodb-operator:PR-2151-135050b2' + set +o xtrace ----------------------------------------------------------------------------------- use new image perconalab/percona-server-mongodb-operator:PR-2151-135050b2 ----------------------------------------------------------------------------------- + kubectl_bin patch deployment -n psmdb-operator percona-server-mongodb-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-server-mongodb-operator","image":"perconalab/percona-server-mongodb-operator:PR-2151-135050b2"}]}}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.Twf4NoTuiJ ++ mktemp + local LAST_ERR=/tmp/tmp.nIa1kbLUlr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch deployment -n psmdb-operator percona-server-mongodb-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-server-mongodb-operator","image":"perconalab/percona-server-mongodb-operator:PR-2151-135050b2"}]}}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Twf4NoTuiJ deployment.apps/percona-server-mongodb-operator patched + cat /tmp/tmp.nIa1kbLUlr + rm /tmp/tmp.Twf4NoTuiJ /tmp/tmp.nIa1kbLUlr + return 0 + kubectl_bin rollout status deployment/percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.1xuHgZWkC0 ++ mktemp + local LAST_ERR=/tmp/tmp.1Zjm19wmTt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl rollout status deployment/percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1xuHgZWkC0 Waiting for deployment "percona-server-mongodb-operator" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "percona-server-mongodb-operator" rollout to finish: 1 old replicas are pending termination... deployment "percona-server-mongodb-operator" successfully rolled out + cat /tmp/tmp.1Zjm19wmTt + rm /tmp/tmp.1xuHgZWkC0 /tmp/tmp.1Zjm19wmTt + return 0 + desc 'wait for operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- wait for operator upgrade ----------------------------------------------------------------------------------- ++ kubectl_bin get pods -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ grep -vc NAME ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.btOP8bw432 +++ mktemp ++ local LAST_ERR=/tmp/tmp.JryG9kIpfr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.btOP8bw432 ++ cat /tmp/tmp.JryG9kIpfr ++ rm /tmp/tmp.btOP8bw432 /tmp/tmp.JryG9kIpfr ++ return 0 + [[ 1 -eq 1 ]] + sleep 10 + desc 'check images and generation after operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after operator upgrade ----------------------------------------------------------------------------------- + wait_for_running upgrade-sharded-rs0 3 false + local name=upgrade-sharded-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-rs0-0 + local pod=upgrade-sharded-rs0-0 + set +o xtrace waiting for pod/upgrade-sharded-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-rs0-1 + local pod=upgrade-sharded-rs0-1 + set +o xtrace waiting for pod/upgrade-sharded-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wZkqGh8iAY +++ mktemp ++ local LAST_ERR=/tmp/tmp.fR9vxFOzwR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wZkqGh8iAY ++ cat /tmp/tmp.fR9vxFOzwR ++ rm /tmp/tmp.wZkqGh8iAY /tmp/tmp.fR9vxFOzwR ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-sharded-rs0-2 + local pod=upgrade-sharded-rs0-2 + set +o xtrace waiting for pod/upgrade-sharded-rs0-2 to be ready.OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LfwDfCIRcW +++ mktemp ++ local LAST_ERR=/tmp/tmp.xO182E1nMo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LfwDfCIRcW ++ cat /tmp/tmp.xO182E1nMo ++ rm /tmp/tmp.LfwDfCIRcW /tmp/tmp.xO182E1nMo ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UVP2mxWsZ1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8eh0CB5mom ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UVP2mxWsZ1 ++ cat /tmp/tmp.8eh0CB5mom ++ rm /tmp/tmp.UVP2mxWsZ1 /tmp/tmp.8eh0CB5mom ++ return 0 + [[ false == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running upgrade-sharded-cfg 3 false + local name=upgrade-sharded-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-cfg-0 + local pod=upgrade-sharded-cfg-0 + set +o xtrace waiting for pod/upgrade-sharded-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-cfg-1 + local pod=upgrade-sharded-cfg-1 + set +o xtrace waiting for pod/upgrade-sharded-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dBl2RktTgA +++ mktemp ++ local LAST_ERR=/tmp/tmp.L8ppoTCATR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dBl2RktTgA ++ cat /tmp/tmp.L8ppoTCATR ++ rm /tmp/tmp.dBl2RktTgA /tmp/tmp.L8ppoTCATR ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod upgrade-sharded-cfg-2 + local pod=upgrade-sharded-cfg-2 + set +o xtrace waiting for pod/upgrade-sharded-cfg-2 to be ready.OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9rB45orE5a +++ mktemp ++ local LAST_ERR=/tmp/tmp.TD4r0Y3Q6c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9rB45orE5a ++ cat /tmp/tmp.TD4r0Y3Q6c ++ rm /tmp/tmp.9rB45orE5a /tmp/tmp.TD4r0Y3Q6c ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lgyFKdvtYw +++ mktemp ++ local LAST_ERR=/tmp/tmp.cSzoJKNl5e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lgyFKdvtYw ++ cat /tmp/tmp.cSzoJKNl5e ++ rm /tmp/tmp.lgyFKdvtYw /tmp/tmp.cSzoJKNl5e ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_cluster_consistency upgrade-sharded + local retry=0 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ltq4qwDS4j +++ mktemp ++ local LAST_ERR=/tmp/tmp.YpeT9AQuGO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ltq4qwDS4j ++ cat /tmp/tmp.YpeT9AQuGO ++ rm /tmp/tmp.ltq4qwDS4j /tmp/tmp.YpeT9AQuGO ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qY7ecxNPir +++ mktemp ++ local LAST_ERR=/tmp/tmp.g5NULQwDqi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qY7ecxNPir ++ cat /tmp/tmp.g5NULQwDqi ++ rm /tmp/tmp.qY7ecxNPir /tmp/tmp.g5NULQwDqi ++ return 0 + [[ 3 == \3 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VMjqQRPt8L +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z5LuCOrPxB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VMjqQRPt8L ++ cat /tmp/tmp.Z5LuCOrPxB ++ rm /tmp/tmp.VMjqQRPt8L /tmp/tmp.Z5LuCOrPxB ++ return 0 + [[ 3 == \3 ]] + check_applied_images operator + local updated_image=operator + case "${updated_image}" in ++ kubectl_bin get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p5Mk6e1hit +++ mktemp ++ local LAST_ERR=/tmp/tmp.DogsIha7PI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p5Mk6e1hit ++ cat /tmp/tmp.DogsIha7PI ++ rm /tmp/tmp.p5Mk6e1hit /tmp/tmp.DogsIha7PI ++ return 0 + [[ perconalab/percona-server-mongodb-operator:PR-2151-135050b2 == perconalab/percona-server-mongodb-operator:PR-2151-135050b2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yg7Jx9alv8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.75GII1r0Wf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yg7Jx9alv8 ++ cat /tmp/tmp.75GII1r0Wf ++ rm /tmp/tmp.yg7Jx9alv8 /tmp/tmp.75GII1r0Wf ++ return 0 + [[ percona/percona-backup-mongodb:2.11.0 == percona/percona-backup-mongodb:2.11.0 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EBXqO0VBBU +++ mktemp ++ local LAST_ERR=/tmp/tmp.GOZxxhinTM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EBXqO0VBBU ++ cat /tmp/tmp.GOZxxhinTM ++ rm /tmp/tmp.EBXqO0VBBU /tmp/tmp.GOZxxhinTM ++ return 0 + [[ percona/pmm-client:2.44.1-1 percona/pmm-client:3.4.1 == percona/pmm-client:2.44.1-1 percona/pmm-client:3.4.1 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ihOPK3de5Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.gnY8Nm2iGf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ihOPK3de5Q ++ cat /tmp/tmp.gnY8Nm2iGf ++ rm /tmp/tmp.ihOPK3de5Q /tmp/tmp.gnY8Nm2iGf ++ return 0 + [[ percona/percona-server-mongodb:8.0.12-4 == percona/percona-server-mongodb:8.0.12-4 ]] + : Operator image has been updated correctly + compare_generation 1 statefulset upgrade-sharded-rs0 + local generation=1 + local resource=statefulset + local name=upgrade-sharded-rs0 + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jrQKfLtU3K +++ mktemp ++ local LAST_ERR=/tmp/tmp.PhKMgHR02V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-sharded-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jrQKfLtU3K ++ cat /tmp/tmp.PhKMgHR02V ++ rm /tmp/tmp.jrQKfLtU3K /tmp/tmp.PhKMgHR02V ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 statefulset upgrade-sharded-cfg + local generation=1 + local resource=statefulset + local name=upgrade-sharded-cfg + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IpgMJ1d9lk +++ mktemp ++ local LAST_ERR=/tmp/tmp.95N9uicGme ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-sharded-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IpgMJ1d9lk ++ cat /tmp/tmp.95N9uicGme ++ rm /tmp/tmp.IpgMJ1d9lk /tmp/tmp.95N9uicGme ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 statefulset upgrade-sharded-mongos + local generation=1 + local resource=statefulset + local name=upgrade-sharded-mongos + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-mongos -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nVPRPUI13L +++ mktemp ++ local LAST_ERR=/tmp/tmp.CZPC7JdMEN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-sharded-mongos -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nVPRPUI13L ++ cat /tmp/tmp.CZPC7JdMEN ++ rm /tmp/tmp.nVPRPUI13L /tmp/tmp.CZPC7JdMEN ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 psmdb upgrade-sharded + local generation=1 + local resource=psmdb + local name=upgrade-sharded + local current_generation ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dge1IxHt3q +++ mktemp ++ local LAST_ERR=/tmp/tmp.S8MOc7kweZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dge1IxHt3q ++ cat /tmp/tmp.S8MOc7kweZ ++ rm /tmp/tmp.dge1IxHt3q /tmp/tmp.S8MOc7kweZ ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'patch psmdb images and upgrade' + set +o xtrace ----------------------------------------------------------------------------------- patch psmdb images and upgrade ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb upgrade-sharded --type=merge --patch '{ "spec": { "crVersion": "1.22.0", "image": "perconalab/percona-server-mongodb-operator:main-mongod8.0", "pmm": { "image": "percona/pmm-client:2.44.1-1" }, "backup": { "image": "perconalab/percona-server-mongodb-operator:main-backup" } }}' ++ mktemp + local LAST_OUT=/tmp/tmp.EyVvzY71S0 ++ mktemp + local LAST_ERR=/tmp/tmp.mpFAmrJ1Up + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb upgrade-sharded --type=merge --patch '{ "spec": { "crVersion": "1.22.0", "image": "perconalab/percona-server-mongodb-operator:main-mongod8.0", "pmm": { "image": "percona/pmm-client:2.44.1-1" }, "backup": { "image": "perconalab/percona-server-mongodb-operator:main-backup" } }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EyVvzY71S0 perconaservermongodb.psmdb.percona.com/upgrade-sharded patched + cat /tmp/tmp.mpFAmrJ1Up + rm /tmp/tmp.EyVvzY71S0 /tmp/tmp.mpFAmrJ1Up + return 0 + sleep 10 + desc 'check cluster after full upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check cluster after full upgrade ----------------------------------------------------------------------------------- + wait_for_running upgrade-sharded-rs0 3 false + local name=upgrade-sharded-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-rs0-0 + local pod=upgrade-sharded-rs0-0 + set +o xtrace waiting for pod/upgrade-sharded-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-rs0-1 + local pod=upgrade-sharded-rs0-1 + set +o xtrace waiting for pod/upgrade-sharded-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ks06LiiS2P +++ mktemp ++ local LAST_ERR=/tmp/tmp.rhqCTYB58v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ks06LiiS2P ++ cat /tmp/tmp.rhqCTYB58v ++ rm /tmp/tmp.Ks06LiiS2P /tmp/tmp.rhqCTYB58v ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-sharded-rs0-2 + local pod=upgrade-sharded-rs0-2 + set +o xtrace waiting for pod/upgrade-sharded-rs0-2 to be ready.OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YSXnsxxgA3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.W3RUV03tzl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YSXnsxxgA3 ++ cat /tmp/tmp.W3RUV03tzl ++ rm /tmp/tmp.YSXnsxxgA3 /tmp/tmp.W3RUV03tzl ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8w82VwnG0n +++ mktemp ++ local LAST_ERR=/tmp/tmp.B8s6dSqgG0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8w82VwnG0n ++ cat /tmp/tmp.B8s6dSqgG0 ++ rm /tmp/tmp.8w82VwnG0n /tmp/tmp.B8s6dSqgG0 ++ return 0 + [[ false == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running upgrade-sharded-cfg 3 false + local name=upgrade-sharded-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=upgrade-sharded ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-sharded-cfg-0 + local pod=upgrade-sharded-cfg-0 + set +o xtrace waiting for pod/upgrade-sharded-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-sharded-cfg-1 + local pod=upgrade-sharded-cfg-1 + set +o xtrace waiting for pod/upgrade-sharded-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Sqr7x3vhMc +++ mktemp ++ local LAST_ERR=/tmp/tmp.ITyGfn1gQL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Sqr7x3vhMc ++ cat /tmp/tmp.ITyGfn1gQL ++ rm /tmp/tmp.Sqr7x3vhMc /tmp/tmp.ITyGfn1gQL ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod upgrade-sharded-cfg-2 + local pod=upgrade-sharded-cfg-2 + set +o xtrace waiting for pod/upgrade-sharded-cfg-2 to be ready.OK ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AFDfN9wK8A +++ mktemp ++ local LAST_ERR=/tmp/tmp.w7DEb5OEPW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AFDfN9wK8A ++ cat /tmp/tmp.w7DEb5OEPW ++ rm /tmp/tmp.AFDfN9wK8A /tmp/tmp.w7DEb5OEPW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hlkng7EeRU +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cajkl7wQga ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hlkng7EeRU ++ cat /tmp/tmp.Cajkl7wQga ++ rm /tmp/tmp.hlkng7EeRU /tmp/tmp.Cajkl7wQga ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_cluster_consistency upgrade-sharded + local retry=0 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h8uycwD7m0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.wZYwLeIEwH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.h8uycwD7m0 ++ cat /tmp/tmp.wZYwLeIEwH ++ rm /tmp/tmp.h8uycwD7m0 /tmp/tmp.wZYwLeIEwH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DO3XqLOUaO +++ mktemp ++ local LAST_ERR=/tmp/tmp.3n6bMfKlk7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DO3XqLOUaO ++ cat /tmp/tmp.3n6bMfKlk7 ++ rm /tmp/tmp.DO3XqLOUaO /tmp/tmp.3n6bMfKlk7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uCKtZhDnOU +++ mktemp ++ local LAST_ERR=/tmp/tmp.bsrUrGL9jb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uCKtZhDnOU ++ cat /tmp/tmp.bsrUrGL9jb ++ rm /tmp/tmp.uCKtZhDnOU /tmp/tmp.bsrUrGL9jb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XY58nybuW8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9wbLUwGpum ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XY58nybuW8 ++ cat /tmp/tmp.9wbLUwGpum ++ rm /tmp/tmp.XY58nybuW8 /tmp/tmp.9wbLUwGpum ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S2O1rYktOQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.R2dqIvIsxK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.S2O1rYktOQ ++ cat /tmp/tmp.R2dqIvIsxK ++ rm /tmp/tmp.S2O1rYktOQ /tmp/tmp.R2dqIvIsxK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9AduvYDEiu +++ mktemp ++ local LAST_ERR=/tmp/tmp.LEoJhNiQeg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9AduvYDEiu ++ cat /tmp/tmp.LEoJhNiQeg ++ rm /tmp/tmp.9AduvYDEiu /tmp/tmp.LEoJhNiQeg ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ywIKktQuWW +++ mktemp ++ local LAST_ERR=/tmp/tmp.P2T02Q9tHo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ywIKktQuWW ++ cat /tmp/tmp.P2T02Q9tHo ++ rm /tmp/tmp.ywIKktQuWW /tmp/tmp.P2T02Q9tHo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.F9fbjSfTyO +++ mktemp ++ local LAST_ERR=/tmp/tmp.mEchOHfVTk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.F9fbjSfTyO ++ cat /tmp/tmp.mEchOHfVTk ++ rm /tmp/tmp.F9fbjSfTyO /tmp/tmp.mEchOHfVTk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oXjd2TfpmV +++ mktemp ++ local LAST_ERR=/tmp/tmp.E8ppyellCW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oXjd2TfpmV ++ cat /tmp/tmp.E8ppyellCW ++ rm /tmp/tmp.oXjd2TfpmV /tmp/tmp.E8ppyellCW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bQrGdm4lxQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.CAo2ce0dKx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bQrGdm4lxQ ++ cat /tmp/tmp.CAo2ce0dKx ++ rm /tmp/tmp.bQrGdm4lxQ /tmp/tmp.CAo2ce0dKx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Cfu18JPisV +++ mktemp ++ local LAST_ERR=/tmp/tmp.hYc6re2cNW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Cfu18JPisV ++ cat /tmp/tmp.hYc6re2cNW ++ rm /tmp/tmp.Cfu18JPisV /tmp/tmp.hYc6re2cNW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CfAAPjRJ9p +++ mktemp ++ local LAST_ERR=/tmp/tmp.aOHBS9H5EN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CfAAPjRJ9p ++ cat /tmp/tmp.aOHBS9H5EN ++ rm /tmp/tmp.CfAAPjRJ9p /tmp/tmp.aOHBS9H5EN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iw9A9Xw8Wz +++ mktemp ++ local LAST_ERR=/tmp/tmp.0JkcbGzhXp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iw9A9Xw8Wz ++ cat /tmp/tmp.0JkcbGzhXp ++ rm /tmp/tmp.iw9A9Xw8Wz /tmp/tmp.0JkcbGzhXp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b936O7Zx8d +++ mktemp ++ local LAST_ERR=/tmp/tmp.MNmiKxmpqx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b936O7Zx8d ++ cat /tmp/tmp.MNmiKxmpqx ++ rm /tmp/tmp.b936O7Zx8d /tmp/tmp.MNmiKxmpqx ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RPE9VyZREy +++ mktemp ++ local LAST_ERR=/tmp/tmp.aTOU31qQBL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RPE9VyZREy ++ cat /tmp/tmp.aTOU31qQBL ++ rm /tmp/tmp.RPE9VyZREy /tmp/tmp.aTOU31qQBL ++ return 0 + [[ 3 == \3 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rz852OyQ59 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7TwIDrritd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rz852OyQ59 ++ cat /tmp/tmp.7TwIDrritd ++ rm /tmp/tmp.rz852OyQ59 /tmp/tmp.7TwIDrritd ++ return 0 + [[ 3 == \3 ]] + simple_data_check upgrade-sharded 3 1 -mongos + local cluster_name=upgrade-sharded + let last_pod=3-1 + local isSharded=1 + local cluster_pfx=-mongos + '[' 1 -eq 1 ']' + sleep 10 + wait_cluster_consistency upgrade-sharded + local retry=0 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v8Y9EWFrSp +++ mktemp ++ local LAST_ERR=/tmp/tmp.mUP3C5MTsk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v8Y9EWFrSp ++ cat /tmp/tmp.mUP3C5MTsk ++ rm /tmp/tmp.v8Y9EWFrSp /tmp/tmp.mUP3C5MTsk ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hfF3yA1kMf +++ mktemp ++ local LAST_ERR=/tmp/tmp.06GAz9iW7x ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hfF3yA1kMf ++ cat /tmp/tmp.06GAz9iW7x ++ rm /tmp/tmp.hfF3yA1kMf /tmp/tmp.06GAz9iW7x ++ return 0 + [[ 3 == \3 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BUGroKwKZx +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZKnMlCJ2kC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BUGroKwKZx ++ cat /tmp/tmp.ZKnMlCJ2kC ++ rm /tmp/tmp.BUGroKwKZx /tmp/tmp.ZKnMlCJ2kC ++ return 0 + [[ 3 == \3 ]] + compare_mongos_cmd find myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local command=find + local uri=myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mlWJ9XjuuF +++ mktemp ++ local LAST_ERR=/tmp/tmp.rQD7tquQGm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mlWJ9XjuuF ++ cat /tmp/tmp.rQD7tquQGm ++ rm /tmp/tmp.mlWJ9XjuuF /tmp/tmp.rQD7tquQGm ++ return 0 + local client_container=psmdb-client-696897d69b-ngt5t + kubectl_bin exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.9ew5FPHdOI ++ mktemp + local LAST_ERR=/tmp/tmp.hBwMAzjHZ2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9ew5FPHdOI + cat /tmp/tmp.hBwMAzjHZ2 + rm /tmp/tmp.9ew5FPHdOI /tmp/tmp.hBwMAzjHZ2 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/upgrade-sharded/compare/find.json /tmp/tmp.HyXxiG9f2c/find + check_applied_images all + local updated_image=all + case "${updated_image}" in ++ kubectl_bin get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tiNnulxxNN +++ mktemp ++ local LAST_ERR=/tmp/tmp.tX9l1a28O7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tiNnulxxNN ++ cat /tmp/tmp.tX9l1a28O7 ++ rm /tmp/tmp.tiNnulxxNN /tmp/tmp.tX9l1a28O7 ++ return 0 + [[ perconalab/percona-server-mongodb-operator:PR-2151-135050b2 == perconalab/percona-server-mongodb-operator:PR-2151-135050b2 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.geNLv4iQ0g +++ mktemp ++ local LAST_ERR=/tmp/tmp.eeru4vLCOy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.geNLv4iQ0g ++ cat /tmp/tmp.eeru4vLCOy ++ rm /tmp/tmp.geNLv4iQ0g /tmp/tmp.eeru4vLCOy ++ return 0 + [[ perconalab/percona-server-mongodb-operator:main-backup == perconalab/percona-server-mongodb-operator:main-backup ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fs6upkAQkp +++ mktemp ++ local LAST_ERR=/tmp/tmp.cMbQYU9Jl1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fs6upkAQkp ++ cat /tmp/tmp.cMbQYU9Jl1 ++ rm /tmp/tmp.Fs6upkAQkp /tmp/tmp.cMbQYU9Jl1 ++ return 0 + [[ percona/pmm-client:2.44.1-1 == percona/pmm-client:2.44.1-1 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.spec.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YGDhIkzoi2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kED2sSwE5Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.spec.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YGDhIkzoi2 ++ cat /tmp/tmp.kED2sSwE5Z ++ rm /tmp/tmp.YGDhIkzoi2 /tmp/tmp.kED2sSwE5Z ++ return 0 + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 == perconalab/percona-server-mongodb-operator:main-mongod8.0 ]] + : Cluster images have been updated correctly + compare_generation 2 statefulset upgrade-sharded-rs0 + local generation=2 + local resource=statefulset + local name=upgrade-sharded-rs0 + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ElnkFTV7aP +++ mktemp ++ local LAST_ERR=/tmp/tmp.5u5M9UJfe7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-sharded-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ElnkFTV7aP ++ cat /tmp/tmp.5u5M9UJfe7 ++ rm /tmp/tmp.ElnkFTV7aP /tmp/tmp.5u5M9UJfe7 ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_generation 2 statefulset upgrade-sharded-cfg + local generation=2 + local resource=statefulset + local name=upgrade-sharded-cfg + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A0miF6Q7zA +++ mktemp ++ local LAST_ERR=/tmp/tmp.eQOHltaXly ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-sharded-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A0miF6Q7zA ++ cat /tmp/tmp.eQOHltaXly ++ rm /tmp/tmp.A0miF6Q7zA /tmp/tmp.eQOHltaXly ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_generation 2 statefulset upgrade-sharded-mongos + local generation=2 + local resource=statefulset + local name=upgrade-sharded-mongos + local current_generation ++ kubectl_bin get statefulset upgrade-sharded-mongos -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wueGs5EcWa +++ mktemp ++ local LAST_ERR=/tmp/tmp.AZ4pYk1Hfq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-sharded-mongos -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wueGs5EcWa ++ cat /tmp/tmp.AZ4pYk1Hfq ++ rm /tmp/tmp.wueGs5EcWa /tmp/tmp.AZ4pYk1Hfq ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_generation 2 psmdb upgrade-sharded + local generation=2 + local resource=psmdb + local name=upgrade-sharded + local current_generation ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5sBlvlLCvr +++ mktemp ++ local LAST_ERR=/tmp/tmp.I3zNGwLhqr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5sBlvlLCvr ++ cat /tmp/tmp.I3zNGwLhqr ++ rm /tmp/tmp.5sBlvlLCvr /tmp/tmp.I3zNGwLhqr ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + desc 'check if upgrade order is cfg, rs0, mongos' + set +o xtrace ----------------------------------------------------------------------------------- check if upgrade order is cfg, rs0, mongos ----------------------------------------------------------------------------------- + sleep 60 + check_upgrade_order cfg 3 1 + local pod_type=cfg + local cluster_size=3 + local upgrade_order=1 + local start=1 + local end=3 ++ grep -vE '^NAME|client|operator|minio-service' ++ kubectl_bin get pod --sort-by=.status.startTime ++ sed -n 1,3p ++ grep -c '\-cfg\-' +++ mktemp grep: warning: stray \ before - ++ local LAST_OUT=/tmp/tmp.30hnwMsjEk +++ mktemp ++ local LAST_ERR=/tmp/tmp.sgCH8cgwd6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod --sort-by=.status.startTime ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.30hnwMsjEk ++ cat /tmp/tmp.sgCH8cgwd6 ++ rm /tmp/tmp.30hnwMsjEk /tmp/tmp.sgCH8cgwd6 ++ return 0 + local nr=3 + [[ 3 -ne 3 ]] + echo 'cfg was upgraded 1!' cfg was upgraded 1! + check_upgrade_order rs0 3 2 + local pod_type=rs0 + local cluster_size=3 + local upgrade_order=2 + local start=4 + local end=6 ++ kubectl_bin get pod --sort-by=.status.startTime ++ grep -vE '^NAME|client|operator|minio-service' ++ sed -n 4,6p ++ grep -c '\-rs0\-' +++ mktemp grep: warning: stray \ before - ++ local LAST_OUT=/tmp/tmp.czkDLb6bzu +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZjqnjwJFMU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod --sort-by=.status.startTime ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.czkDLb6bzu ++ cat /tmp/tmp.ZjqnjwJFMU ++ rm /tmp/tmp.czkDLb6bzu /tmp/tmp.ZjqnjwJFMU ++ return 0 + local nr=3 + [[ 3 -ne 3 ]] + echo 'rs0 was upgraded 2!' rs0 was upgraded 2! + check_upgrade_order mongos 3 3 + local pod_type=mongos + local cluster_size=3 + local upgrade_order=3 + local start=7 + local end=9 ++ kubectl_bin get pod --sort-by=.status.startTime ++ grep -vE '^NAME|client|operator|minio-service' ++ sed -n 7,9p ++ grep -c '\-mongos\-' +++ mktemp grep: warning: stray \ before - ++ local LAST_OUT=/tmp/tmp.e4ZO1jqoWt +++ mktemp ++ local LAST_ERR=/tmp/tmp.gnIHPljM1v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod --sort-by=.status.startTime ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e4ZO1jqoWt ++ cat /tmp/tmp.gnIHPljM1v ++ rm /tmp/tmp.e4ZO1jqoWt /tmp/tmp.gnIHPljM1v ++ return 0 + local nr=3 + [[ 3 -ne 3 ]] + echo 'mongos was upgraded 3!' mongos was upgraded 3! + desc 'drop collection and do restore with new version' + set +o xtrace ----------------------------------------------------------------------------------- drop collection and do restore with new version ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b3HY6LN3zf +++ mktemp ++ local LAST_ERR=/tmp/tmp.W3lKEou0Hk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b3HY6LN3zf ++ cat /tmp/tmp.W3lKEou0Hk ++ rm /tmp/tmp.b3HY6LN3zf /tmp/tmp.W3lKEou0Hk ++ return 0 + local client_container=psmdb-client-696897d69b-ngt5t + kubectl_bin exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.8F9u9okLKp ++ mktemp + local LAST_ERR=/tmp/tmp.OCmgIBPa4Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8F9u9okLKp Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("63c5a121-a590-4c6a-82bd-cd8ded0d6880") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.OCmgIBPa4Q + rm /tmp/tmp.8F9u9okLKp /tmp/tmp.OCmgIBPa4Q + return 0 + check_backup_in_storage backup-minio minio rs0 myApp.test.gz + local backup=backup-minio + local storage_type=minio + local replset=rs0 + local file=myApp.test.gz + local endpoint ++ get_backup_dest backup-minio ++ local backup_name=backup-minio ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.7DxAXUkgCy +++ mktemp ++ local LAST_ERR=/tmp/tmp.z0JU5iKypU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7DxAXUkgCy ++ cat /tmp/tmp.z0JU5iKypU ++ rm /tmp/tmp.7DxAXUkgCy /tmp/tmp.z0JU5iKypU ++ return 0 + backup_dest=operator-testing/2025-12-17T15:46:08Z + case ${storage_type} in + endpoint=minio-service + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-17T15:46:08Z/rs0/myApp.test.gz + grep myApp.test.gz ++ mktemp + local LAST_OUT=/tmp/tmp.EjI0mx7jh0 ++ mktemp + local LAST_ERR=/tmp/tmp.r4QXIxtnZw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-17T15:46:08Z/rs0/myApp.test.gz + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EjI0mx7jh0 + cat /tmp/tmp.r4QXIxtnZw All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.EjI0mx7jh0 /tmp/tmp.r4QXIxtnZw + return 0 + sleep 1 + let retry+=1 + '[' 1 -ge 60 ']' + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-17T15:46:08Z/rs0/myApp.test.gz + grep myApp.test.gz ++ mktemp + local LAST_OUT=/tmp/tmp.IlC4NIvxbT ++ mktemp + local LAST_ERR=/tmp/tmp.28nSaSoSvT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-12-17T15:46:08Z/rs0/myApp.test.gz + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IlC4NIvxbT + cat /tmp/tmp.28nSaSoSvT All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_upgrade-sharded-715 + rm /tmp/tmp.IlC4NIvxbT /tmp/tmp.28nSaSoSvT + return 0 2025-12-17 15:46:12 55 myApp.test.gz + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5unl7EnLgI +++ mktemp ++ local LAST_ERR=/tmp/tmp.86NdsSedfY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5unl7EnLgI ++ cat /tmp/tmp.86NdsSedfY ++ rm /tmp/tmp.5unl7EnLgI /tmp/tmp.86NdsSedfY ++ return 0 + local client_container=psmdb-client-696897d69b-ngt5t + kubectl_bin exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.zHELZgPCDw ++ mktemp + local LAST_ERR=/tmp/tmp.rckOGyB2V3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zHELZgPCDw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("658b0d0e-4440-4e78-9f3d-412a57c1bb09") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.rckOGyB2V3 + rm /tmp/tmp.zHELZgPCDw /tmp/tmp.rckOGyB2V3 + return 0 + compare_mongos_cmd find myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 mongodb .svc.cluster.local '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xS5szeZiXL +++ mktemp ++ local LAST_ERR=/tmp/tmp.w5B0NdWDJW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xS5szeZiXL ++ cat /tmp/tmp.w5B0NdWDJW ++ rm /tmp/tmp.xS5szeZiXL /tmp/tmp.w5B0NdWDJW ++ return 0 + local client_container=psmdb-client-696897d69b-ngt5t + kubectl_bin exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.vAKD3B98Iw ++ mktemp + local LAST_ERR=/tmp/tmp.Bfy5YgoJCI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vAKD3B98Iw + cat /tmp/tmp.Bfy5YgoJCI + rm /tmp/tmp.vAKD3B98Iw /tmp/tmp.Bfy5YgoJCI + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/upgrade-sharded/compare/find-2nd.json /tmp/tmp.HyXxiG9f2c/find-2nd + run_restore backup-minio + local backup_name=backup-minio + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/upgrade-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.lDCyTjpvmw ++ mktemp + local LAST_ERR=/tmp/tmp.08oC2Tnon1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lDCyTjpvmw perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio created + cat /tmp/tmp.08oC2Tnon1 + rm /tmp/tmp.lDCyTjpvmw /tmp/tmp.08oC2Tnon1 + return 0 + wait_restore backup-minio upgrade-sharded + local backup_name=backup-minio + local cluster_name=upgrade-sharded + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio object to be created.OK Waiting psmdb-restore/restore-backup-minio to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency upgrade-sharded + local retry=0 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nzbGTzSLub +++ mktemp ++ local LAST_ERR=/tmp/tmp.tp5sHBWdkr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nzbGTzSLub ++ cat /tmp/tmp.tp5sHBWdkr ++ rm /tmp/tmp.nzbGTzSLub /tmp/tmp.tp5sHBWdkr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7b5C4ARQRP +++ mktemp ++ local LAST_ERR=/tmp/tmp.8TmLvnmWCs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7b5C4ARQRP ++ cat /tmp/tmp.8TmLvnmWCs ++ rm /tmp/tmp.7b5C4ARQRP /tmp/tmp.8TmLvnmWCs ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AK7P0LSYV9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qzMex2AQ90 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AK7P0LSYV9 ++ cat /tmp/tmp.qzMex2AQ90 ++ rm /tmp/tmp.AK7P0LSYV9 /tmp/tmp.qzMex2AQ90 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PBYrQDqtDN +++ mktemp ++ local LAST_ERR=/tmp/tmp.1lS3LM760x ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PBYrQDqtDN ++ cat /tmp/tmp.1lS3LM760x ++ rm /tmp/tmp.PBYrQDqtDN /tmp/tmp.1lS3LM760x ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ewTZYrPRc0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MIuFLLupCr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ewTZYrPRc0 ++ cat /tmp/tmp.MIuFLLupCr ++ rm /tmp/tmp.ewTZYrPRc0 /tmp/tmp.MIuFLLupCr ++ return 0 + [[ 3 == \3 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rCIXw0v4rq +++ mktemp ++ local LAST_ERR=/tmp/tmp.L9FvY7k8ki ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rCIXw0v4rq ++ cat /tmp/tmp.L9FvY7k8ki ++ rm /tmp/tmp.rCIXw0v4rq /tmp/tmp.L9FvY7k8ki ++ return 0 + [[ 3 == \3 ]] + simple_data_check upgrade-sharded 3 1 -mongos + local cluster_name=upgrade-sharded + let last_pod=3-1 + local isSharded=1 + local cluster_pfx=-mongos + '[' 1 -eq 1 ']' + sleep 10 + wait_cluster_consistency upgrade-sharded + local retry=0 ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.W2qUJKV9Mz +++ mktemp ++ local LAST_ERR=/tmp/tmp.671o8qQosE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.W2qUJKV9Mz ++ cat /tmp/tmp.671o8qQosE ++ rm /tmp/tmp.W2qUJKV9Mz /tmp/tmp.671o8qQosE ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bckYJQdyAs +++ mktemp ++ local LAST_ERR=/tmp/tmp.OFBWIxsg0R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bckYJQdyAs ++ cat /tmp/tmp.OFBWIxsg0R ++ rm /tmp/tmp.bckYJQdyAs /tmp/tmp.OFBWIxsg0R ++ return 0 + [[ 3 == \3 ]] ++ kubectl_bin get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wmgs5txcmM +++ mktemp ++ local LAST_ERR=/tmp/tmp.qBZEnqr0XR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade-sharded -o 'jsonpath={.status.replsets.cfg.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wmgs5txcmM ++ cat /tmp/tmp.qBZEnqr0XR ++ rm /tmp/tmp.wmgs5txcmM /tmp/tmp.qBZEnqr0XR ++ return 0 + [[ 3 == \3 ]] + compare_mongos_cmd find myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local command=find + local uri=myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ajvve2K27s +++ mktemp ++ local LAST_ERR=/tmp/tmp.UsPKGqqZ3A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ajvve2K27s ++ cat /tmp/tmp.UsPKGqqZ3A ++ rm /tmp/tmp.Ajvve2K27s /tmp/tmp.UsPKGqqZ3A ++ return 0 + local client_container=psmdb-client-696897d69b-ngt5t + kubectl_bin exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.wdRX1JBetU ++ mktemp + local LAST_ERR=/tmp/tmp.mVdSNPxHh3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ngt5t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-sharded-mongos.upgrade-sharded-715.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wdRX1JBetU + cat /tmp/tmp.mVdSNPxHh3 + rm /tmp/tmp.wdRX1JBetU /tmp/tmp.mVdSNPxHh3 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/e2e-tests/upgrade-sharded/compare/find.json /tmp/tmp.HyXxiG9f2c/find + desc cleanup + set +o xtrace ----------------------------------------------------------------------------------- cleanup ----------------------------------------------------------------------------------- + destroy upgrade-sharded-715 + local namespace=upgrade-sharded-715 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.RD7EtIGIBv +++ mktemp ++ local LAST_ERR=/tmp/tmp.RomV2QsyAd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RD7EtIGIBv ++ cat /tmp/tmp.RomV2QsyAd ++ rm /tmp/tmp.RD7EtIGIBv /tmp/tmp.RomV2QsyAd ++ return 0 + '[' 1 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.yXlELmDazp ++ mktemp + local LAST_ERR=/tmp/tmp.EkDekvM4BE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yXlELmDazp NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-minio upgrade-sharded minio s3://operator-testing/2025-12-17T15:46:08Z logical 89.92KB ready 10m 10m + cat /tmp/tmp.EkDekvM4BE + rm /tmp/tmp.yXlELmDazp /tmp/tmp.EkDekvM4BE + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.mcJ30Lkkl8 ++ mktemp + local LAST_ERR=/tmp/tmp.8A5fVQT5N0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mcJ30Lkkl8 perconaservermongodbbackup.psmdb.percona.com "backup-minio" deleted from upgrade-sharded-715 namespace + cat /tmp/tmp.8A5fVQT5N0 + rm /tmp/tmp.mcJ30Lkkl8 /tmp/tmp.8A5fVQT5N0 + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.C9tQZJtWob ++ mktemp + local LAST_ERR=/tmp/tmp.nYygekKivq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C9tQZJtWob customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.nYygekKivq + rm /tmp/tmp.C9tQZJtWob /tmp/tmp.nYygekKivq + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.4trcVY0h7r ++ mktemp + local LAST_ERR=/tmp/tmp.J2ScikoMTL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4trcVY0h7r + cat /tmp/tmp.J2ScikoMTL + rm /tmp/tmp.4trcVY0h7r /tmp/tmp.J2ScikoMTL + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.8zCUy1Q5JV ++ mktemp + local LAST_ERR=/tmp/tmp.DTRdEUX2kP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8zCUy1Q5JV + cat /tmp/tmp.DTRdEUX2kP + rm /tmp/tmp.8zCUy1Q5JV /tmp/tmp.DTRdEUX2kP + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n upgrade-sharded-715 upgrade-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/upgrade-sharded patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.p7IqtxNb2r ++ mktemp + local LAST_ERR=/tmp/tmp.b8vU8h2K2h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p7IqtxNb2r customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.b8vU8h2K2h + rm /tmp/tmp.p7IqtxNb2r /tmp/tmp.b8vU8h2K2h + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.ZyxhJsDEHy ++ mktemp + local LAST_ERR=/tmp/tmp.fllGhGQvBX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2151/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZyxhJsDEHy clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.fllGhGQvBX + rm /tmp/tmp.ZyxhJsDEHy /tmp/tmp.fllGhGQvBX + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.g67tgrIACS ++ mktemp + local LAST_ERR=/tmp/tmp.NGsePUJnP7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g67tgrIACS namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace service "cert-manager-cainjector" deleted from cert-manager namespace service "cert-manager" deleted from cert-manager namespace service "cert-manager-webhook" deleted from cert-manager namespace deployment.apps "cert-manager-cainjector" deleted from cert-manager namespace deployment.apps "cert-manager" deleted from cert-manager namespace deployment.apps "cert-manager-webhook" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.NGsePUJnP7 + rm /tmp/tmp.g67tgrIACS /tmp/tmp.NGsePUJnP7 + return 0 + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace upgrade-sharded-715 + rm -rf /tmp/tmp.HyXxiG9f2c + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.8SSwVE2bdy + local LAST_OUT=/tmp/tmp.BE9zkZfcVe ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.wgtGOKAEPd + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.oC8Z4sR1JF + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace upgrade-sharded-715 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator