Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/logs/upgrade.log Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 + cluster=upgrade + CLUSTER_SIZE=3 + TARGET_OPERATOR_VER=1.22.0 + TARGET_IMAGE=perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f + TARGET_IMAGE_MONGOD=perconalab/percona-server-mongodb-operator:main-mongod8.0 + TARGET_IMAGE_PMM_CLIENT=percona/pmm-client:2.44.1-1 + TARGET_IMAGE_BACKUP=perconalab/percona-server-mongodb-operator:main-backup ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod8.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod8.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ local 'cli=mongod --version' +++ local pod_name=5923 +++ kubectl_bin -n default run 5923 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.12zcfgeHOl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Xg8YOo9eRG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 5923 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.12zcfgeHOl +++ cat /tmp/tmp.Xg8YOo9eRG +++ rm /tmp/tmp.12zcfgeHOl /tmp/tmp.Xg8YOo9eRG +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/5923 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JTa9uVqTot ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xAYvKHTDC2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/5923 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JTa9uVqTot +++ cat /tmp/tmp.xAYvKHTDC2 +++ rm /tmp/tmp.JTa9uVqTot /tmp/tmp.xAYvKHTDC2 +++ return 0 ++++ kubectl_bin -n default exec 5923 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.iPxi5CQS0f +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.bqU57a4al8 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 5923 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.iPxi5CQS0f ++++ cat /tmp/tmp.bqU57a4al8 ++++ rm /tmp/tmp.iPxi5CQS0f /tmp/tmp.bqU57a4al8 ++++ return 0 +++ local 'output=db version v8.0.17-6 Build Info: { "version": "8.0.17-6", "gitVersion": "22257a4d2e901468cf31d122e1c0ea549b66770e", "openSSLVersion": "OpenSSL 3.5.1 1 Jul 2025", "modules": [], "perconaFeatures": [ "MemoryEngine", "HotBackup", "BackupCursorAggregationStage", "BackupCursorExtendAggregationStage", "AWSIAM", "Kerberos", "LDAP", "OIDC", "TDE", "FIPSMode", "FCBIS", "Auditing", "ProfilingRateLimit", "LogRedaction", "ngram" ], "allocator": "tcmalloc-google", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/5923 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aMn2s51EC6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.W9JyymzM5c +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/5923 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aMn2s51EC6 +++ cat /tmp/tmp.W9JyymzM5c Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.aMn2s51EC6 /tmp/tmp.W9JyymzM5c +++ return 0 +++ echo db version v8.0.17-6 Build Info: '{' '"version":' '"8.0.17-6",' '"gitVersion":' '"22257a4d2e901468cf31d122e1c0ea549b66770e",' '"openSSLVersion":' '"OpenSSL' 3.5.1 1 Jul '2025",' '"modules":' '[],' '"perconaFeatures":' '[' '"MemoryEngine",' '"HotBackup",' '"BackupCursorAggregationStage",' '"BackupCursorExtendAggregationStage",' '"AWSIAM",' '"Kerberos",' '"LDAP",' '"OIDC",' '"TDE",' '"FIPSMode",' '"FCBIS",' '"Auditing",' '"ProfilingRateLimit",' '"LogRedaction",' '"ngram"' '],' '"allocator":' '"tcmalloc-google",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=8.0.17-6 ++ [[ ! 8.0.17-6 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 8.0.17-6 + FULL_VER=8.0.17-6 + MONGO_VER=8.0 ++ jq -r '.versions[].operator' ++ curl -s https://check.percona.com/versions/v1/psmdb-operator ++ sort -V ++ tail -n1 + INIT_OPERATOR_VER=1.21.2 + [[ 1.21.2 == \1\.\2\2\.\0 ]] + GIT_TAG=v1.21.2 + case $(curl -s -o /dev/null -w "%{http_code}" 'https://check.percona.com/versions/v1/psmdb-operator/'${INIT_OPERATOR_VER}'/latest?databaseVersion='${MONGO_VER}'') in ++ curl -s -o /dev/null -w '%{http_code}' 'https://check.percona.com/versions/v1/psmdb-operator/1.21.2/latest?databaseVersion=8.0' ++ curl -s 'https://check.percona.com/versions/v1/psmdb-operator/1.21.2/latest?databaseVersion=8.0' + INIT_OPERATOR_IMAGES='{"versions":[{"product":"psmdb-operator","operator":"1.21.2","matrix":{"mongod":{"8.0.17-6":{"imagePath":"percona/percona-server-mongodb:8.0.17-6","imageHash":"ae6380469f6b73d3517ec4eae7b2f12ff6310dc2deae8e52fe514276c45e9440","imageHashArm64":"f1170f8bf68d051816cd4d956ca1f6ee9885c6cf0e1e5db5dc00a137af3603ee","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"390bfd12f981e8b3890550c4927a3ece071377065e001894458047602c744e3b","status":"available","critical":false},"3.5.0":{"imagePath":"percona/pmm-client:3.5.0","imageHash":"352aee74f25b3c1c4cd9dff1f378a0c3940b315e551d170c09953bf168531e4a","imageHashArm64":"cbbb074d51d90a5f2d6f1d98a05024f6de2ffdcb5acab632324cea4349a820bd","status":"recommended","critical":false}},"proxysql":{},"haproxy":{},"backup":{"2.11.0":{"imagePath":"percona/percona-backup-mongodb:2.11.0","imageHash":"d09f5de92cfbc5a7a42a8cc86742a07481c98b3b42cffdc6359b3ec1f63de3a5","imageHashArm64":"a60d095439537b982209582d428b3b39a01e31e88b2b62d2dcbd99ea4e2d9928","status":"recommended","critical":false}},"operator":{"1.21.2":{"imagePath":"percona/percona-server-mongodb-operator:1.21.2","imageHash":"76d59626914f4d18eb0c19d8e31d2480f7a358daa3ded777cafb7e3717c7508d","imageHashArm64":"b6adecc41de81f69a4faf552aeca31c06411f012378be248ead70a538c8ea365","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' + OPERATOR_NAME=percona-server-mongodb-operator ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.21.2","matrix":{"mongod":{"8.0.17-6":{"imagePath":"percona/percona-server-mongodb:8.0.17-6","imageHash":"ae6380469f6b73d3517ec4eae7b2f12ff6310dc2deae8e52fe514276c45e9440","imageHashArm64":"f1170f8bf68d051816cd4d956ca1f6ee9885c6cf0e1e5db5dc00a137af3603ee","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"390bfd12f981e8b3890550c4927a3ece071377065e001894458047602c744e3b","status":"available","critical":false},"3.5.0":{"imagePath":"percona/pmm-client:3.5.0","imageHash":"352aee74f25b3c1c4cd9dff1f378a0c3940b315e551d170c09953bf168531e4a","imageHashArm64":"cbbb074d51d90a5f2d6f1d98a05024f6de2ffdcb5acab632324cea4349a820bd","status":"recommended","critical":false}},"proxysql":{},"haproxy":{},"backup":{"2.11.0":{"imagePath":"percona/percona-backup-mongodb:2.11.0","imageHash":"d09f5de92cfbc5a7a42a8cc86742a07481c98b3b42cffdc6359b3ec1f63de3a5","imageHashArm64":"a60d095439537b982209582d428b3b39a01e31e88b2b62d2dcbd99ea4e2d9928","status":"recommended","critical":false}},"operator":{"1.21.2":{"imagePath":"percona/percona-server-mongodb-operator:1.21.2","imageHash":"76d59626914f4d18eb0c19d8e31d2480f7a358daa3ded777cafb7e3717c7508d","imageHashArm64":"b6adecc41de81f69a4faf552aeca31c06411f012378be248ead70a538c8ea365","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.operator[].imagePath' + IMAGE=percona/percona-server-mongodb-operator:1.21.2 ++ echo perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f ++ cut -d/ -f1 + [[ perconalab == \p\e\r\c\o\n\a\l\a\b ]] + IMAGE=perconalab/percona-server-mongodb-operator:1.21.2 ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.21.2","matrix":{"mongod":{"8.0.17-6":{"imagePath":"percona/percona-server-mongodb:8.0.17-6","imageHash":"ae6380469f6b73d3517ec4eae7b2f12ff6310dc2deae8e52fe514276c45e9440","imageHashArm64":"f1170f8bf68d051816cd4d956ca1f6ee9885c6cf0e1e5db5dc00a137af3603ee","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"390bfd12f981e8b3890550c4927a3ece071377065e001894458047602c744e3b","status":"available","critical":false},"3.5.0":{"imagePath":"percona/pmm-client:3.5.0","imageHash":"352aee74f25b3c1c4cd9dff1f378a0c3940b315e551d170c09953bf168531e4a","imageHashArm64":"cbbb074d51d90a5f2d6f1d98a05024f6de2ffdcb5acab632324cea4349a820bd","status":"recommended","critical":false}},"proxysql":{},"haproxy":{},"backup":{"2.11.0":{"imagePath":"percona/percona-backup-mongodb:2.11.0","imageHash":"d09f5de92cfbc5a7a42a8cc86742a07481c98b3b42cffdc6359b3ec1f63de3a5","imageHashArm64":"a60d095439537b982209582d428b3b39a01e31e88b2b62d2dcbd99ea4e2d9928","status":"recommended","critical":false}},"operator":{"1.21.2":{"imagePath":"percona/percona-server-mongodb-operator:1.21.2","imageHash":"76d59626914f4d18eb0c19d8e31d2480f7a358daa3ded777cafb7e3717c7508d","imageHashArm64":"b6adecc41de81f69a4faf552aeca31c06411f012378be248ead70a538c8ea365","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.mongod[].imagePath' + IMAGE_MONGOD=percona/percona-server-mongodb:8.0.17-6 ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.21.2","matrix":{"mongod":{"8.0.17-6":{"imagePath":"percona/percona-server-mongodb:8.0.17-6","imageHash":"ae6380469f6b73d3517ec4eae7b2f12ff6310dc2deae8e52fe514276c45e9440","imageHashArm64":"f1170f8bf68d051816cd4d956ca1f6ee9885c6cf0e1e5db5dc00a137af3603ee","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"390bfd12f981e8b3890550c4927a3ece071377065e001894458047602c744e3b","status":"available","critical":false},"3.5.0":{"imagePath":"percona/pmm-client:3.5.0","imageHash":"352aee74f25b3c1c4cd9dff1f378a0c3940b315e551d170c09953bf168531e4a","imageHashArm64":"cbbb074d51d90a5f2d6f1d98a05024f6de2ffdcb5acab632324cea4349a820bd","status":"recommended","critical":false}},"proxysql":{},"haproxy":{},"backup":{"2.11.0":{"imagePath":"percona/percona-backup-mongodb:2.11.0","imageHash":"d09f5de92cfbc5a7a42a8cc86742a07481c98b3b42cffdc6359b3ec1f63de3a5","imageHashArm64":"a60d095439537b982209582d428b3b39a01e31e88b2b62d2dcbd99ea4e2d9928","status":"recommended","critical":false}},"operator":{"1.21.2":{"imagePath":"percona/percona-server-mongodb-operator:1.21.2","imageHash":"76d59626914f4d18eb0c19d8e31d2480f7a358daa3ded777cafb7e3717c7508d","imageHashArm64":"b6adecc41de81f69a4faf552aeca31c06411f012378be248ead70a538c8ea365","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.pmm[].imagePath' + IMAGE_PMM_CLIENT='percona/pmm-client:2.44.1-1 percona/pmm-client:3.5.0' ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.21.2","matrix":{"mongod":{"8.0.17-6":{"imagePath":"percona/percona-server-mongodb:8.0.17-6","imageHash":"ae6380469f6b73d3517ec4eae7b2f12ff6310dc2deae8e52fe514276c45e9440","imageHashArm64":"f1170f8bf68d051816cd4d956ca1f6ee9885c6cf0e1e5db5dc00a137af3603ee","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3","imageHashArm64":"390bfd12f981e8b3890550c4927a3ece071377065e001894458047602c744e3b","status":"available","critical":false},"3.5.0":{"imagePath":"percona/pmm-client:3.5.0","imageHash":"352aee74f25b3c1c4cd9dff1f378a0c3940b315e551d170c09953bf168531e4a","imageHashArm64":"cbbb074d51d90a5f2d6f1d98a05024f6de2ffdcb5acab632324cea4349a820bd","status":"recommended","critical":false}},"proxysql":{},"haproxy":{},"backup":{"2.11.0":{"imagePath":"percona/percona-backup-mongodb:2.11.0","imageHash":"d09f5de92cfbc5a7a42a8cc86742a07481c98b3b42cffdc6359b3ec1f63de3a5","imageHashArm64":"a60d095439537b982209582d428b3b39a01e31e88b2b62d2dcbd99ea4e2d9928","status":"recommended","critical":false}},"operator":{"1.21.2":{"imagePath":"percona/percona-server-mongodb-operator:1.21.2","imageHash":"76d59626914f4d18eb0c19d8e31d2480f7a358daa3ded777cafb7e3717c7508d","imageHashArm64":"b6adecc41de81f69a4faf552aeca31c06411f012378be248ead70a538c8ea365","status":"recommended","critical":false}},"logCollector":{"4.0.1":{"imagePath":"percona/fluentbit:4.0.1","imageHash":"a4ab7dd10379ccf74607f6b05225c4996eeff53b628bda94e615781a1f58b779","imageHashArm64":"","status":"recommended","critical":false}},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.backup[].imagePath' + IMAGE_BACKUP=percona/percona-backup-mongodb:2.11.0 + [[ 1.22.0 == \1\.\2\1\.\2 ]] + main + rbac=rbac + '[' -n psmdb-operator ']' + rbac=cw-rbac + create_infra_gh upgrade-13274 v1.21.2 + local ns=upgrade-13274 + local git_tag=v1.21.2 + check_crd_for_deletion v1.21.2 + local git_tag=v1.21.2 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.2/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/perconaservermongodbbackups.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j0G9dg6ahY +++ mktemp ++ local LAST_ERR=/tmp/tmp.0ZCBq3hTMd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/perconaservermongodbbackups.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j0G9dg6ahY ++ cat /tmp/tmp.0ZCBq3hTMd ++ rm /tmp/tmp.j0G9dg6ahY /tmp/tmp.0ZCBq3hTMd ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/perconaservermongodbrestores.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eKklihAWfc +++ mktemp ++ local LAST_ERR=/tmp/tmp.QbJsys187F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/perconaservermongodbrestores.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eKklihAWfc ++ cat /tmp/tmp.QbJsys187F ++ rm /tmp/tmp.eKklihAWfc /tmp/tmp.QbJsys187F ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/perconaservermongodbs.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w4OY1YYrCl +++ mktemp ++ local LAST_ERR=/tmp/tmp.8WKoSUeDrP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/perconaservermongodbs.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w4OY1YYrCl ++ cat /tmp/tmp.8WKoSUeDrP ++ rm /tmp/tmp.w4OY1YYrCl /tmp/tmp.8WKoSUeDrP ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- ++ mktemp + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.JalWRvzP13 ++ mktemp + local LAST_OUT=/tmp/tmp.bn0nvdNydO + local LAST_ERR=/tmp/tmp.zudBnHTRpO + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.TeBWtWmQk5 + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JalWRvzP13 + cat /tmp/tmp.zudBnHTRpO + rm /tmp/tmp.JalWRvzP13 /tmp/tmp.zudBnHTRpO + return 0 namespace "upgrade-19796" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bn0nvdNydO namespace "psmdb-operator" deleted + cat /tmp/tmp.TeBWtWmQk5 + rm /tmp/tmp.bn0nvdNydO /tmp/tmp.TeBWtWmQk5 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.f0LHHveyMA ++ mktemp + local LAST_ERR=/tmp/tmp.W27LSHUcwC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f0LHHveyMA + cat /tmp/tmp.W27LSHUcwC + rm /tmp/tmp.f0LHHveyMA /tmp/tmp.W27LSHUcwC + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.2XiGuHwevn ++ mktemp + local LAST_ERR=/tmp/tmp.qKfMH8QQTg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2XiGuHwevn namespace/psmdb-operator created + cat /tmp/tmp.qKfMH8QQTg + rm /tmp/tmp.2XiGuHwevn /tmp/tmp.qKfMH8QQTg + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.m9AdQ6rjHF +++ mktemp ++ local LAST_ERR=/tmp/tmp.QfrZVEV3ND ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m9AdQ6rjHF ++ cat /tmp/tmp.QfrZVEV3ND ++ rm /tmp/tmp.m9AdQ6rjHF /tmp/tmp.QfrZVEV3ND ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster5 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.doPTBNvtI1 ++ mktemp + local LAST_ERR=/tmp/tmp.GjDynM4yRG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster5 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.doPTBNvtI1 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster5" modified. + cat /tmp/tmp.GjDynM4yRG + rm /tmp/tmp.doPTBNvtI1 /tmp/tmp.GjDynM4yRG + return 0 + deploy_operator_gh v1.21.2 + local git_tag=v1.21.2 + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + kubectl_bin apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.2/deploy/crd.yaml --server-side ++ mktemp + local LAST_OUT=/tmp/tmp.p8tuTxGBkx ++ mktemp + local LAST_ERR=/tmp/tmp.RfT2dqiAh9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.2/deploy/crd.yaml --server-side + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p8tuTxGBkx customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.RfT2dqiAh9 + rm /tmp/tmp.p8tuTxGBkx /tmp/tmp.RfT2dqiAh9 + return 0 + local rbac_yaml=rbac + local operator_yaml=operator + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac + operator_yaml=cw-operator + kubectl_bin apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.2/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.0AZd19Hq6v ++ mktemp + local LAST_ERR=/tmp/tmp.avZNw9ReuO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.2/deploy/cw-rbac.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0AZd19Hq6v clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator unchanged serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator unchanged + cat /tmp/tmp.avZNw9ReuO + rm /tmp/tmp.0AZd19Hq6v /tmp/tmp.avZNw9ReuO + return 0 + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.2/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:1.21.2") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /tmp/tmp.AXerBqiwXt/cw-operator_v1.21.2.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.GI53n3ZKvY ++ mktemp + local LAST_ERR=/tmp/tmp.DOzT55QwJ0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GI53n3ZKvY deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.DOzT55QwJ0 + rm /tmp/tmp.GI53n3ZKvY /tmp/tmp.DOzT55QwJ0 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.V3JhpBBQss +++ mktemp ++ local LAST_ERR=/tmp/tmp.o48y3Nkp6t ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V3JhpBBQss ++ cat /tmp/tmp.o48y3Nkp6t ++ rm /tmp/tmp.V3JhpBBQss /tmp/tmp.o48y3Nkp6t ++ return 0 + wait_operator_pod percona-server-mongodb-operator-6dfc5bbc75-jnffj + local pod=percona-server-mongodb-operator-6dfc5bbc75-jnffj + set +o xtrace waiting for pod/percona-server-mongodb-operator-6dfc5bbc75-jnffj to be ready..OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.pel7aIdHl2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1tO2xXvUru ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pel7aIdHl2 ++ cat /tmp/tmp.1tO2xXvUru ++ rm /tmp/tmp.pel7aIdHl2 /tmp/tmp.1tO2xXvUru ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-6dfc5bbc75-jnffj ++ mktemp + local LAST_OUT=/tmp/tmp.PWWDa1Fmkr ++ mktemp + local LAST_ERR=/tmp/tmp.ujnqdIV73G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-6dfc5bbc75-jnffj + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PWWDa1Fmkr + cat /tmp/tmp.ujnqdIV73G + rm /tmp/tmp.PWWDa1Fmkr /tmp/tmp.ujnqdIV73G + return 0 2026-01-21T14:20:28.858Z INFO setup Manager starting up {"gitCommit": "320a5bc58b6c6b01450b8a284f5021948c3ad044", "gitBranch": "release-1-21-2", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace upgrade-13274 + local namespace=upgrade-13274 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces upgrade-13274' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-13274 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-13274 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.4PF5wiwivJ ++ mktemp + local LAST_OUT=/tmp/tmp.RA7mbHsAyK ++ mktemp + local LAST_ERR=/tmp/tmp.Pc4jvpRZQK + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.NQZGMJ0C8w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace upgrade-13274 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4PF5wiwivJ + cat /tmp/tmp.Pc4jvpRZQK + rm /tmp/tmp.4PF5wiwivJ /tmp/tmp.Pc4jvpRZQK + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RA7mbHsAyK + cat /tmp/tmp.NQZGMJ0C8w + rm /tmp/tmp.RA7mbHsAyK /tmp/tmp.NQZGMJ0C8w + return 0 + kubectl_bin wait --for=delete namespace upgrade-13274 ++ mktemp + local LAST_OUT=/tmp/tmp.TbaKkds9M6 ++ mktemp + local LAST_ERR=/tmp/tmp.yIql8Xc3O7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace upgrade-13274 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TbaKkds9M6 + cat /tmp/tmp.yIql8Xc3O7 + rm /tmp/tmp.TbaKkds9M6 /tmp/tmp.yIql8Xc3O7 + return 0 + desc 'create namespace upgrade-13274' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-13274 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-13274 ++ mktemp + local LAST_OUT=/tmp/tmp.Fx1kxOqQVj ++ mktemp + local LAST_ERR=/tmp/tmp.GCu3faoZJY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace upgrade-13274 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Fx1kxOqQVj namespace/upgrade-13274 created + cat /tmp/tmp.GCu3faoZJY + rm /tmp/tmp.Fx1kxOqQVj /tmp/tmp.GCu3faoZJY + return 0 + set_kube_ctx upgrade-13274 + local namespace=upgrade-13274 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.NVJHrR8OAU +++ mktemp ++ local LAST_ERR=/tmp/tmp.twy5elLnt4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NVJHrR8OAU ++ cat /tmp/tmp.twy5elLnt4 ++ rm /tmp/tmp.NVJHrR8OAU /tmp/tmp.twy5elLnt4 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster5 --namespace=upgrade-13274 ++ mktemp + local LAST_OUT=/tmp/tmp.bHKoGmIP88 ++ mktemp + local LAST_ERR=/tmp/tmp.wbLOiTfqCy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster5 --namespace=upgrade-13274 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bHKoGmIP88 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster5" modified. + cat /tmp/tmp.wbLOiTfqCy + rm /tmp/tmp.bHKoGmIP88 /tmp/tmp.wbLOiTfqCy + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Iwcj0mV6ku ++ mktemp + local LAST_ERR=/tmp/tmp.R0UmnxpCyA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Iwcj0mV6ku secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created secret/gcp-cs-sa-key-secret created + cat /tmp/tmp.R0UmnxpCyA + rm /tmp/tmp.Iwcj0mV6ku /tmp/tmp.R0UmnxpCyA + return 0 + deploy_minio + local cert_secret= + local service_name=minio-service + desc 'install MinIO: minio-service' + set +o xtrace ----------------------------------------------------------------------------------- install MinIO: minio-service ----------------------------------------------------------------------------------- + helm uninstall minio-service + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + local endpoint=http://minio-service:9000 + minio_args=('--version' '5.4.0' '--set' 'replicas=1' '--set' 'mode=standalone' '--set' 'resources.requests.memory=256Mi' '--set' 'rootUser=rootuser' '--set' 'rootPassword=rootpass123' '--set' 'users[0].accessKey=some-access-key' '--set' 'users[0].secretKey=some-secret-key' '--set' 'users[0].policy=consoleAdmin' '--set' 'service.type=ClusterIP' '--set' 'configPathmc=/tmp/' '--set' 'securityContext.enabled=false' '--set' 'persistence.size=2G' '--set' 'fullnameOverride=minio-service' '--set' 'serviceAccount.create=true' '--set' 'serviceAccount.name=minio-service-sa') + local minio_args + [[ -n '' ]] + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio NAME: minio-service LAST DEPLOYED: Wed Jan 21 14:20:51 2026 NAMESPACE: upgrade-13274 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.upgrade-13274.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace upgrade-13274 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace upgrade-13274 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace upgrade-13274 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace upgrade-13274 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UjmKl9sVqI +++ mktemp ++ local LAST_ERR=/tmp/tmp.ol7oYnJgWr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UjmKl9sVqI ++ cat /tmp/tmp.ol7oYnJgWr ++ rm /tmp/tmp.UjmKl9sVqI /tmp/tmp.ol7oYnJgWr ++ return 0 + local MINIO_POD=minio-service-549fcd79c5-2kd8c + wait_pod minio-service-549fcd79c5-2kd8c + local pod=minio-service-549fcd79c5-2kd8c + set +o xtrace waiting for pod/minio-service-549fcd79c5-2kd8c to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.upgrade-13274.svc.cluster.local --tcp=9000 service/minio-service created + create_minio_bucket operator-testing http://minio-service:9000 + local bucket=operator-testing + local endpoint=http://minio-service:9000 + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.cQXGmvLyX7 ++ mktemp + local LAST_ERR=/tmp/tmp.fVUVMr2XrW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cQXGmvLyX7 pod "aws-cli" deleted from upgrade-13274 namespace + cat /tmp/tmp.fVUVMr2XrW All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.cQXGmvLyX7 /tmp/tmp.fVUVMr2XrW + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.2/deploy/secrets.yaml + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/client.yml -f /tmp/tmp.AXerBqiwXt/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.NwTpx73lDU ++ mktemp + local LAST_ERR=/tmp/tmp.dY55hGFGJV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/client.yml -f /tmp/tmp.AXerBqiwXt/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NwTpx73lDU deployment.apps/psmdb-client created secret/my-cluster-name-secrets created + cat /tmp/tmp.dY55hGFGJV + rm /tmp/tmp.NwTpx73lDU /tmp/tmp.dY55hGFGJV + return 0 + local cr_yaml=/tmp/tmp.AXerBqiwXt/cr_v1.21.2.yaml + prepare_cr_yaml /tmp/tmp.AXerBqiwXt/cr_v1.21.2.yaml + local cr_yaml=/tmp/tmp.AXerBqiwXt/cr_v1.21.2.yaml + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.2/deploy/cr.yaml + yq eval ' .metadata.name = "upgrade" | .spec.upgradeOptions.apply = "disabled" | .spec.replsets[].size = 3 | .spec.replsets[].arbiter.enabled = false | .spec.backup.enabled = true | .spec.backup.pitr.enabled = false | .spec.backup.storages.minio.type = "s3" | .spec.backup.storages.minio.s3.credentialsSecret = "minio-secret" | .spec.backup.storages.minio.s3.region = "us-east-1" | .spec.backup.storages.minio.s3.bucket = "operator-testing" | .spec.backup.storages.minio.s3.endpointUrl = "http://minio-service:9000/" | .spec.sharding.enabled = false | .spec.image="" | .spec.image tag="!!null" | .spec.backup.image = "-backup" | .spec.pmm.image = "-pmm"' + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + apply_cluster /tmp/tmp.AXerBqiwXt/cr_v1.21.2.yaml + '[' -z '' ']' + cat_config /tmp/tmp.AXerBqiwXt/cr_v1.21.2.yaml + kubectl_bin apply -f - + cat /tmp/tmp.AXerBqiwXt/cr_v1.21.2.yaml + yq eval '(.spec | select(.image == null)).image = "percona/percona-server-mongodb:8.0.17-6"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1 percona/pmm-client:3.5.0"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:1.21.2"' + yq eval '(.spec | select(has("backup"))).backup.image = "percona/percona-backup-mongodb:2.11.0"' + yq eval '.spec.upgradeOptions.apply="Never"' + /usr/sbin/sed -e s/NAME_SPACE/upgrade-13274/g ++ mktemp + local LAST_OUT=/tmp/tmp.dFzOX1Ms5G ++ mktemp + local LAST_ERR=/tmp/tmp.ZEE981YaSa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dFzOX1Ms5G perconaservermongodb.psmdb.percona.com/upgrade created + cat /tmp/tmp.ZEE981YaSa + rm /tmp/tmp.dFzOX1Ms5G /tmp/tmp.ZEE981YaSa + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running upgrade-rs0 3 + local name=upgrade-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-rs0-0 + local pod=upgrade-rs0-0 + set +o xtrace waiting for pod/upgrade-rs0-0 to be ready.................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-rs0-1 + local pod=upgrade-rs0-1 + set +o xtrace waiting for pod/upgrade-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QRSGVMT1V3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eA2zpIpWuo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QRSGVMT1V3 ++ cat /tmp/tmp.eA2zpIpWuo ++ rm /tmp/tmp.QRSGVMT1V3 /tmp/tmp.eA2zpIpWuo ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-rs0-2 + local pod=upgrade-rs0-2 + set +o xtrace waiting for pod/upgrade-rs0-2 to be ready........OK ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HsdRNhqeni +++ mktemp ++ local LAST_ERR=/tmp/tmp.xI6JIMtyWu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HsdRNhqeni ++ cat /tmp/tmp.xI6JIMtyWu ++ rm /tmp/tmp.HsdRNhqeni /tmp/tmp.xI6JIMtyWu ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2Pdv4BjXfR +++ mktemp ++ local LAST_ERR=/tmp/tmp.elayeTAsi0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2Pdv4BjXfR ++ cat /tmp/tmp.elayeTAsi0 ++ rm /tmp/tmp.2Pdv4BjXfR /tmp/tmp.elayeTAsi0 ++ return 0 + [[ false == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness. + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@upgrade-rs0.upgrade-13274 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@upgrade-rs0.upgrade-13274 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@upgrade-rs0.upgrade-13274 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EYyo6D8zyE +++ mktemp ++ local LAST_ERR=/tmp/tmp.FMjXE3tOTt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EYyo6D8zyE ++ cat /tmp/tmp.FMjXE3tOTt ++ rm /tmp/tmp.EYyo6D8zyE /tmp/tmp.FMjXE3tOTt ++ return 0 + local client_container=psmdb-client-696897d69b-4kvc6 + kubectl_bin exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.UVz4qq9gSt ++ mktemp + local LAST_ERR=/tmp/tmp.5YkcLFDTuw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UVz4qq9gSt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-rs0-1.upgrade-rs0.upgrade-13274.svc.cluster.local:27017,upgrade-rs0-0.upgrade-rs0.upgrade-13274.svc.cluster.local:27017,upgrade-rs0-2.upgrade-rs0.upgrade-13274.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("de78ba3e-d652-46b8-ad81-4d44ae838351") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.5YkcLFDTuw + rm /tmp/tmp.UVz4qq9gSt /tmp/tmp.5YkcLFDTuw + return 0 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@upgrade-rs0.upgrade-13274 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@upgrade-rs0.upgrade-13274 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@upgrade-rs0.upgrade-13274 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GDAVvC2VLi +++ mktemp ++ local LAST_ERR=/tmp/tmp.kPkJlqTtAR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GDAVvC2VLi ++ cat /tmp/tmp.kPkJlqTtAR ++ rm /tmp/tmp.GDAVvC2VLi /tmp/tmp.kPkJlqTtAR ++ return 0 + local client_container=psmdb-client-696897d69b-4kvc6 + kubectl_bin exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.5MDmzpzpZr ++ mktemp + local LAST_ERR=/tmp/tmp.xbqgn5do14 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5MDmzpzpZr Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-rs0-0.upgrade-rs0.upgrade-13274.svc.cluster.local:27017,upgrade-rs0-2.upgrade-rs0.upgrade-13274.svc.cluster.local:27017,upgrade-rs0-1.upgrade-rs0.upgrade-13274.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("38fea3b5-1452-4014-bc19-1054b7782644") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.xbqgn5do14 + rm /tmp/tmp.5MDmzpzpZr /tmp/tmp.xbqgn5do14 + return 0 + desc 'check if cr and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if cr and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_generation 1 statefulset upgrade-rs0 + local generation=1 + local resource=statefulset + local name=upgrade-rs0 + local current_generation ++ kubectl_bin get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uDO870K2NB +++ mktemp ++ local LAST_ERR=/tmp/tmp.uEWxVISF2A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uDO870K2NB ++ cat /tmp/tmp.uEWxVISF2A ++ rm /tmp/tmp.uDO870K2NB /tmp/tmp.uEWxVISF2A ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 psmdb upgrade + local generation=1 + local resource=psmdb + local name=upgrade + local current_generation ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qyhd9v3xiO +++ mktemp ++ local LAST_ERR=/tmp/tmp.LnGntw2NBf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qyhd9v3xiO ++ cat /tmp/tmp.LnGntw2NBf ++ rm /tmp/tmp.Qyhd9v3xiO /tmp/tmp.LnGntw2NBf ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'create backup ' + set +o xtrace ----------------------------------------------------------------------------------- create backup ----------------------------------------------------------------------------------- + backup_name_minio=backup-minio + run_backup minio + local storage=minio + local backup_name=backup-minio + local type=logical + log 'running backup backup-minio' + set +o xtrace [2026-01-21T14:23:39+0000] running backup backup-minio + yq eval '.metadata.name = "backup-minio" | .spec.storageName = "minio" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/upgrade/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.1CY8E6hdue ++ mktemp + local LAST_ERR=/tmp/tmp.hKf9csbuLY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1CY8E6hdue perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.hKf9csbuLY + rm /tmp/tmp.1CY8E6hdue /tmp/tmp.hKf9csbuLY + return 0 + wait_backup backup-minio + local backup_name=backup-minio + local target_state=ready + set +o xtrace waiting for backup-minio to reach ready state.................................................................OK + desc 'upgrade operator' + set +o xtrace ----------------------------------------------------------------------------------- upgrade operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.8kDGmwGp17 ++ mktemp + local LAST_ERR=/tmp/tmp.AP4XCfNHLv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8kDGmwGp17 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.AP4XCfNHLv + rm /tmp/tmp.8kDGmwGp17 /tmp/tmp.AP4XCfNHLv + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.aSlz4PvXwN ++ mktemp + local LAST_ERR=/tmp/tmp.3pMAL8VEYJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aSlz4PvXwN clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator unchanged serviceaccount/percona-server-mongodb-operator unchanged clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator unchanged + cat /tmp/tmp.3pMAL8VEYJ + rm /tmp/tmp.aSlz4PvXwN /tmp/tmp.3pMAL8VEYJ + return 0 + kubectl_bin patch deployment percona-server-mongodb-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-server-mongodb-operator","image":"perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f"}]}}}}' -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.sqIKbChBKb ++ mktemp + local LAST_ERR=/tmp/tmp.A0wZFbKNmN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch deployment percona-server-mongodb-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-server-mongodb-operator","image":"perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f"}]}}}}' -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sqIKbChBKb deployment.apps/percona-server-mongodb-operator patched + cat /tmp/tmp.A0wZFbKNmN + rm /tmp/tmp.sqIKbChBKb /tmp/tmp.A0wZFbKNmN + return 0 + kubectl_bin rollout status deployment/percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.bqEYH7wUPD ++ mktemp + local LAST_ERR=/tmp/tmp.xYKiDBLCke + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl rollout status deployment/percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bqEYH7wUPD Waiting for deployment "percona-server-mongodb-operator" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "percona-server-mongodb-operator" rollout to finish: 1 old replicas are pending termination... deployment "percona-server-mongodb-operator" successfully rolled out + cat /tmp/tmp.xYKiDBLCke + rm /tmp/tmp.bqEYH7wUPD /tmp/tmp.xYKiDBLCke + return 0 + desc 'wait for operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- wait for operator upgrade ----------------------------------------------------------------------------------- ++ kubectl_bin get pods -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ grep -vc NAME ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OvJvPrQYBF +++ mktemp ++ local LAST_ERR=/tmp/tmp.2JQ1MuBuAz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OvJvPrQYBF ++ cat /tmp/tmp.2JQ1MuBuAz ++ rm /tmp/tmp.OvJvPrQYBF /tmp/tmp.2JQ1MuBuAz ++ return 0 + [[ 1 -eq 1 ]] + sleep 10 + desc 'check images and generation after operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after operator upgrade ----------------------------------------------------------------------------------- + wait_for_running upgrade-rs0 3 + local name=upgrade-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-rs0-0 + local pod=upgrade-rs0-0 + set +o xtrace waiting for pod/upgrade-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-rs0-1 + local pod=upgrade-rs0-1 + set +o xtrace waiting for pod/upgrade-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OtSzNPlWnr +++ mktemp ++ local LAST_ERR=/tmp/tmp.cg8yYy0Mgt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OtSzNPlWnr ++ cat /tmp/tmp.cg8yYy0Mgt ++ rm /tmp/tmp.OtSzNPlWnr /tmp/tmp.cg8yYy0Mgt ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-rs0-2 + local pod=upgrade-rs0-2 + set +o xtrace waiting for pod/upgrade-rs0-2 to be ready.OK ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DHvIJ5CFJH +++ mktemp ++ local LAST_ERR=/tmp/tmp.U5ysb20Pxb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DHvIJ5CFJH ++ cat /tmp/tmp.U5ysb20Pxb ++ rm /tmp/tmp.DHvIJ5CFJH /tmp/tmp.U5ysb20Pxb ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ttGDAPTn2O +++ mktemp ++ local LAST_ERR=/tmp/tmp.aL5R93bKWN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ttGDAPTn2O ++ cat /tmp/tmp.aL5R93bKWN ++ rm /tmp/tmp.ttGDAPTn2O /tmp/tmp.aL5R93bKWN ++ return 0 + [[ false == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + check_applied_images operator + local updated_image=operator + case "${updated_image}" in ++ kubectl_bin get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lZ74dQjUMK +++ mktemp ++ local LAST_ERR=/tmp/tmp.rTjlbucGVF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lZ74dQjUMK ++ cat /tmp/tmp.rTjlbucGVF ++ rm /tmp/tmp.lZ74dQjUMK /tmp/tmp.rTjlbucGVF ++ return 0 + [[ perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f == perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q3ouneuzYK +++ mktemp ++ local LAST_ERR=/tmp/tmp.9BhStsTUJ5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q3ouneuzYK ++ cat /tmp/tmp.9BhStsTUJ5 ++ rm /tmp/tmp.q3ouneuzYK /tmp/tmp.9BhStsTUJ5 ++ return 0 + [[ percona/percona-backup-mongodb:2.11.0 == percona/percona-backup-mongodb:2.11.0 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gVTKQk8gx2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.OGHCpwAz9r ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gVTKQk8gx2 ++ cat /tmp/tmp.OGHCpwAz9r ++ rm /tmp/tmp.gVTKQk8gx2 /tmp/tmp.OGHCpwAz9r ++ return 0 + [[ percona/pmm-client:2.44.1-1 percona/pmm-client:3.5.0 == percona/pmm-client:2.44.1-1 percona/pmm-client:3.5.0 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WdyuKPoDTT +++ mktemp ++ local LAST_ERR=/tmp/tmp.oXWmhuWKjE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WdyuKPoDTT ++ cat /tmp/tmp.oXWmhuWKjE ++ rm /tmp/tmp.WdyuKPoDTT /tmp/tmp.oXWmhuWKjE ++ return 0 + [[ percona/percona-server-mongodb:8.0.17-6 == percona/percona-server-mongodb:8.0.17-6 ]] + : Operator image has been updated correctly + compare_generation 1 statefulset upgrade-rs0 + local generation=1 + local resource=statefulset + local name=upgrade-rs0 + local current_generation ++ kubectl_bin get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RK9aPD9iae +++ mktemp ++ local LAST_ERR=/tmp/tmp.nXhkR1210m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RK9aPD9iae ++ cat /tmp/tmp.nXhkR1210m ++ rm /tmp/tmp.RK9aPD9iae /tmp/tmp.nXhkR1210m ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 psmdb upgrade + local generation=1 + local resource=psmdb + local name=upgrade + local current_generation ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MBwiQuDJsJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.9gI5foGQ5s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MBwiQuDJsJ ++ cat /tmp/tmp.9gI5foGQ5s ++ rm /tmp/tmp.MBwiQuDJsJ /tmp/tmp.9gI5foGQ5s ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'patch psmdb images and upgrade' + set +o xtrace ----------------------------------------------------------------------------------- patch psmdb images and upgrade ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb upgrade --type=merge --patch '{ "spec": { "crVersion": "1.22.0", "image": "perconalab/percona-server-mongodb-operator:main-mongod8.0", "pmm": { "image": "percona/pmm-client:2.44.1-1" }, "backup": { "image": "perconalab/percona-server-mongodb-operator:main-backup" } }}' ++ mktemp + local LAST_OUT=/tmp/tmp.OMpRsy4WCh ++ mktemp + local LAST_ERR=/tmp/tmp.kEuipWs8GL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb upgrade --type=merge --patch '{ "spec": { "crVersion": "1.22.0", "image": "perconalab/percona-server-mongodb-operator:main-mongod8.0", "pmm": { "image": "percona/pmm-client:2.44.1-1" }, "backup": { "image": "perconalab/percona-server-mongodb-operator:main-backup" } }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OMpRsy4WCh perconaservermongodb.psmdb.percona.com/upgrade patched + cat /tmp/tmp.kEuipWs8GL + rm /tmp/tmp.OMpRsy4WCh /tmp/tmp.kEuipWs8GL + return 0 + sleep 10 + desc 'check cluster after full upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check cluster after full upgrade ----------------------------------------------------------------------------------- + wait_for_running upgrade-rs0 3 + local name=upgrade-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-rs0-0 + local pod=upgrade-rs0-0 + set +o xtrace waiting for pod/upgrade-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-rs0-1 + local pod=upgrade-rs0-1 + set +o xtrace waiting for pod/upgrade-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VRRyU2LW4K +++ mktemp ++ local LAST_ERR=/tmp/tmp.oJ9bkOOCeb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VRRyU2LW4K ++ cat /tmp/tmp.oJ9bkOOCeb ++ rm /tmp/tmp.VRRyU2LW4K /tmp/tmp.oJ9bkOOCeb ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-rs0-2 + local pod=upgrade-rs0-2 + set +o xtrace waiting for pod/upgrade-rs0-2 to be ready.OK ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9I3BBOAaV4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.x3pGhLVIW3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9I3BBOAaV4 ++ cat /tmp/tmp.x3pGhLVIW3 ++ rm /tmp/tmp.9I3BBOAaV4 /tmp/tmp.x3pGhLVIW3 ++ return 0 + [[ false == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MQrQLBw7ol +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cy9QrlNxF7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MQrQLBw7ol ++ cat /tmp/tmp.Cy9QrlNxF7 ++ rm /tmp/tmp.MQrQLBw7ol /tmp/tmp.Cy9QrlNxF7 ++ return 0 + [[ false == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................................... + wait_cluster_consistency upgrade + local retry=0 ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iKJlEhxo7x +++ mktemp ++ local LAST_ERR=/tmp/tmp.FPJ6t1nHJ8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iKJlEhxo7x ++ cat /tmp/tmp.FPJ6t1nHJ8 ++ rm /tmp/tmp.iKJlEhxo7x /tmp/tmp.FPJ6t1nHJ8 ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GMVtsaA90r +++ mktemp ++ local LAST_ERR=/tmp/tmp.kfmeFVXdeQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GMVtsaA90r ++ cat /tmp/tmp.kfmeFVXdeQ ++ rm /tmp/tmp.GMVtsaA90r /tmp/tmp.kfmeFVXdeQ ++ return 0 + [[ 3 == \3 ]] + simple_data_check upgrade-rs0 3 + local cluster_name=upgrade-rs0 + let last_pod=3-1 + local isSharded=0 + local cluster_pfx= + '[' 0 -eq 1 ']' ++ seq 0 2 + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274 + local command=find + local uri=myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T14:28:25+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local replica_set=rs0 + [[ myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sjX9NtdVf7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Np4ktaKtWP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sjX9NtdVf7 ++ cat /tmp/tmp.Np4ktaKtWP ++ rm /tmp/tmp.sjX9NtdVf7 /tmp/tmp.Np4ktaKtWP ++ return 0 + local client_container=psmdb-client-696897d69b-4kvc6 + kubectl_bin exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.x0uO2frYKl ++ mktemp + local LAST_ERR=/tmp/tmp.yNVK8rQ8ix + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x0uO2frYKl + cat /tmp/tmp.yNVK8rQ8ix + rm /tmp/tmp.x0uO2frYKl /tmp/tmp.yNVK8rQ8ix + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/upgrade/compare/find.json /tmp/tmp.AXerBqiwXt/find + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274 + local command=find + local uri=myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T14:28:27+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274 mongodb '' '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local replica_set=rs0 + [[ myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kdRv8I6Jsk +++ mktemp ++ local LAST_ERR=/tmp/tmp.aN0zKi3f5M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kdRv8I6Jsk ++ cat /tmp/tmp.aN0zKi3f5M ++ rm /tmp/tmp.kdRv8I6Jsk /tmp/tmp.aN0zKi3f5M ++ return 0 + local client_container=psmdb-client-696897d69b-4kvc6 + kubectl_bin exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.tAbMnnbsVZ ++ mktemp + local LAST_ERR=/tmp/tmp.vJ8sFXqqcX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tAbMnnbsVZ + cat /tmp/tmp.vJ8sFXqqcX + rm /tmp/tmp.tAbMnnbsVZ /tmp/tmp.vJ8sFXqqcX + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/upgrade/compare/find.json /tmp/tmp.AXerBqiwXt/find + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274 + local command=find + local uri=myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T14:28:30+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0cDyPOxIq3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.e9pOqpGv5r ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0cDyPOxIq3 ++ cat /tmp/tmp.e9pOqpGv5r ++ rm /tmp/tmp.0cDyPOxIq3 /tmp/tmp.e9pOqpGv5r ++ return 0 + local client_container=psmdb-client-696897d69b-4kvc6 + kubectl_bin exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.2lw7eE5mqB ++ mktemp + local LAST_ERR=/tmp/tmp.LNL6FfYLzr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2lw7eE5mqB + cat /tmp/tmp.LNL6FfYLzr + rm /tmp/tmp.2lw7eE5mqB /tmp/tmp.LNL6FfYLzr + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/upgrade/compare/find.json /tmp/tmp.AXerBqiwXt/find + check_applied_images all + local updated_image=all + case "${updated_image}" in ++ kubectl_bin get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5Dgi4dR2SH +++ mktemp ++ local LAST_ERR=/tmp/tmp.i9m5VI586f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5Dgi4dR2SH ++ cat /tmp/tmp.i9m5VI586f ++ rm /tmp/tmp.5Dgi4dR2SH /tmp/tmp.i9m5VI586f ++ return 0 + [[ perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f == perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wi4jyXhkzz +++ mktemp ++ local LAST_ERR=/tmp/tmp.fLFtO1wbB5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Wi4jyXhkzz ++ cat /tmp/tmp.fLFtO1wbB5 ++ rm /tmp/tmp.Wi4jyXhkzz /tmp/tmp.fLFtO1wbB5 ++ return 0 + [[ perconalab/percona-server-mongodb-operator:main-backup == perconalab/percona-server-mongodb-operator:main-backup ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CEAChljs6L +++ mktemp ++ local LAST_ERR=/tmp/tmp.1Hm1KaCM5t ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CEAChljs6L ++ cat /tmp/tmp.1Hm1KaCM5t ++ rm /tmp/tmp.CEAChljs6L /tmp/tmp.1Hm1KaCM5t ++ return 0 + [[ percona/pmm-client:2.44.1-1 == percona/pmm-client:2.44.1-1 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zB5EXwphlN +++ mktemp ++ local LAST_ERR=/tmp/tmp.L5QvHLwlTL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zB5EXwphlN ++ cat /tmp/tmp.L5QvHLwlTL ++ rm /tmp/tmp.zB5EXwphlN /tmp/tmp.L5QvHLwlTL ++ return 0 + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 == perconalab/percona-server-mongodb-operator:main-mongod8.0 ]] + : Cluster images have been updated correctly + compare_generation 2 statefulset upgrade-rs0 + local generation=2 + local resource=statefulset + local name=upgrade-rs0 + local current_generation ++ kubectl_bin get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xrjsLy4m3F +++ mktemp ++ local LAST_ERR=/tmp/tmp.C2Ch2gCw9K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xrjsLy4m3F ++ cat /tmp/tmp.C2Ch2gCw9K ++ rm /tmp/tmp.xrjsLy4m3F /tmp/tmp.C2Ch2gCw9K ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_generation 2 psmdb upgrade + local generation=2 + local resource=psmdb + local name=upgrade + local current_generation ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ycGcbhRot3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YocbMNftlY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ycGcbhRot3 ++ cat /tmp/tmp.YocbMNftlY ++ rm /tmp/tmp.ycGcbhRot3 /tmp/tmp.YocbMNftlY ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + desc 'drop collection and do restore with new version' + set +o xtrace ----------------------------------------------------------------------------------- drop collection and do restore with new version ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@upgrade-rs0.upgrade-13274 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@upgrade-rs0.upgrade-13274 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@upgrade-rs0.upgrade-13274 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Bory5yes1x +++ mktemp ++ local LAST_ERR=/tmp/tmp.avWDSbXg3O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bory5yes1x ++ cat /tmp/tmp.avWDSbXg3O ++ rm /tmp/tmp.Bory5yes1x /tmp/tmp.avWDSbXg3O ++ return 0 + local client_container=psmdb-client-696897d69b-4kvc6 + kubectl_bin exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bvD8rlez87 ++ mktemp + local LAST_ERR=/tmp/tmp.RyKvCg7IwP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bvD8rlez87 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-rs0-1.upgrade-rs0.upgrade-13274.svc.cluster.local:27017,upgrade-rs0-0.upgrade-rs0.upgrade-13274.svc.cluster.local:27017,upgrade-rs0-2.upgrade-rs0.upgrade-13274.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("8c270602-0a83-4f0d-a865-6f94fbc67659") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.RyKvCg7IwP + rm /tmp/tmp.bvD8rlez87 /tmp/tmp.RyKvCg7IwP + return 0 + check_backup_in_storage backup-minio minio rs0 myApp.test.gz + local backup=backup-minio + local storage_type=minio + local replset=rs0 + local file=myApp.test.gz + local protocol=http + local endpoint ++ get_backup_dest backup-minio ++ local backup_name=backup-minio ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.AyC9ZFPc4S +++ mktemp ++ local LAST_ERR=/tmp/tmp.aBp46li3Lr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AyC9ZFPc4S ++ cat /tmp/tmp.aBp46li3Lr ++ rm /tmp/tmp.AyC9ZFPc4S /tmp/tmp.aBp46li3Lr ++ return 0 + backup_dest=operator-testing/2026-01-21T14:25:39Z + case ${storage_type} in + endpoint=minio-service + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2026-01-21T14:25:39Z/rs0/myApp.test.gz + grep myApp.test.gz ++ mktemp + local LAST_OUT=/tmp/tmp.3COhKYoq1L ++ mktemp + local LAST_ERR=/tmp/tmp.ii9BlUIe4C + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2026-01-21T14:25:39Z/rs0/myApp.test.gz + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3COhKYoq1L + cat /tmp/tmp.ii9BlUIe4C + rm /tmp/tmp.3COhKYoq1L /tmp/tmp.ii9BlUIe4C + return 0 2026-01-21 14:25:42 55 myApp.test.gz + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@upgrade-rs0.upgrade-13274 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@upgrade-rs0.upgrade-13274 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@upgrade-rs0.upgrade-13274 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.70uQSMSqYI +++ mktemp ++ local LAST_ERR=/tmp/tmp.5F2mH3VxTU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.70uQSMSqYI ++ cat /tmp/tmp.5F2mH3VxTU ++ rm /tmp/tmp.70uQSMSqYI /tmp/tmp.5F2mH3VxTU ++ return 0 + local client_container=psmdb-client-696897d69b-4kvc6 + kubectl_bin exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.qvMp3d5nZy ++ mktemp + local LAST_ERR=/tmp/tmp.MXmxqNpfN4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qvMp3d5nZy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-rs0-0.upgrade-rs0.upgrade-13274.svc.cluster.local:27017,upgrade-rs0-2.upgrade-rs0.upgrade-13274.svc.cluster.local:27017,upgrade-rs0-1.upgrade-rs0.upgrade-13274.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("272b096a-5600-4851-ad40-efb86ec4fbf8") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.MXmxqNpfN4 + rm /tmp/tmp.qvMp3d5nZy /tmp/tmp.MXmxqNpfN4 + return 0 + compare_mongo_cmd find myApp:myPass@upgrade-rs0.upgrade-13274 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@upgrade-rs0.upgrade-13274 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T14:28:51+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0.upgrade-13274 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0.upgrade-13274 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@upgrade-rs0.upgrade-13274 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tI30ckZTcS +++ mktemp ++ local LAST_ERR=/tmp/tmp.04foblO0O5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tI30ckZTcS ++ cat /tmp/tmp.04foblO0O5 ++ rm /tmp/tmp.tI30ckZTcS /tmp/tmp.04foblO0O5 ++ return 0 + local client_container=psmdb-client-696897d69b-4kvc6 + kubectl_bin exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.H9bYX1GZbS ++ mktemp + local LAST_ERR=/tmp/tmp.EgoBKHnkqL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H9bYX1GZbS + cat /tmp/tmp.EgoBKHnkqL + rm /tmp/tmp.H9bYX1GZbS /tmp/tmp.EgoBKHnkqL + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/upgrade/compare/find-2nd.json /tmp/tmp.AXerBqiwXt/find-2nd + run_restore backup-minio + local backup_name=backup-minio + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/upgrade/conf/restore.yml + log 'running restore restore-backup-minio' + set +o xtrace [2026-01-21T14:28:54+0000] running restore restore-backup-minio + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/upgrade/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.elMNbV39Mi ++ mktemp + local LAST_ERR=/tmp/tmp.8ssh8qdxV0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.elMNbV39Mi perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio created + cat /tmp/tmp.8ssh8qdxV0 + rm /tmp/tmp.elMNbV39Mi /tmp/tmp.8ssh8qdxV0 + return 0 + wait_restore backup-minio upgrade + local backup_name=backup-minio + local cluster_name=upgrade + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio object to be created.OK Waiting psmdb-restore/restore-backup-minio to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency upgrade + local retry=0 ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vEUVEBx7xC +++ mktemp ++ local LAST_ERR=/tmp/tmp.5M4ocYNk6a ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vEUVEBx7xC ++ cat /tmp/tmp.5M4ocYNk6a ++ rm /tmp/tmp.vEUVEBx7xC /tmp/tmp.5M4ocYNk6a ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gmg7dLoeWc +++ mktemp ++ local LAST_ERR=/tmp/tmp.EisnvYifqU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gmg7dLoeWc ++ cat /tmp/tmp.EisnvYifqU ++ rm /tmp/tmp.gmg7dLoeWc /tmp/tmp.EisnvYifqU ++ return 0 + [[ 3 == \3 ]] + simple_data_check upgrade-rs0 3 + local cluster_name=upgrade-rs0 + let last_pod=3-1 + local isSharded=0 + local cluster_pfx= + '[' 0 -eq 1 ']' ++ seq 0 2 + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274 + local command=find + local uri=myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T14:29:09+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.viwoCZ3VcO +++ mktemp ++ local LAST_ERR=/tmp/tmp.GpcBpl8yrt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.viwoCZ3VcO ++ cat /tmp/tmp.GpcBpl8yrt ++ rm /tmp/tmp.viwoCZ3VcO /tmp/tmp.GpcBpl8yrt ++ return 0 + local client_container=psmdb-client-696897d69b-4kvc6 + kubectl_bin exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.me09sJ3Hmx ++ mktemp + local LAST_ERR=/tmp/tmp.qTst42ghMc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.me09sJ3Hmx + cat /tmp/tmp.qTst42ghMc + rm /tmp/tmp.me09sJ3Hmx /tmp/tmp.qTst42ghMc + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/upgrade/compare/find.json /tmp/tmp.AXerBqiwXt/find + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274 + local command=find + local uri=myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T14:29:12+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sp2r4f3UCA +++ mktemp ++ local LAST_ERR=/tmp/tmp.71wHcB4cTU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sp2r4f3UCA ++ cat /tmp/tmp.71wHcB4cTU ++ rm /tmp/tmp.sp2r4f3UCA /tmp/tmp.71wHcB4cTU ++ return 0 + local client_container=psmdb-client-696897d69b-4kvc6 + kubectl_bin exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ebn0YbhQoC ++ mktemp + local LAST_ERR=/tmp/tmp.AfwnKkWQJE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ebn0YbhQoC + cat /tmp/tmp.AfwnKkWQJE + rm /tmp/tmp.ebn0YbhQoC /tmp/tmp.AfwnKkWQJE + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/upgrade/compare/find.json /tmp/tmp.AXerBqiwXt/find + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274 + local command=find + local uri=myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T14:29:14+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.03fzA6N92S +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zu2iEHkRbC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.03fzA6N92S ++ cat /tmp/tmp.Zu2iEHkRbC ++ rm /tmp/tmp.03fzA6N92S /tmp/tmp.Zu2iEHkRbC ++ return 0 + local client_container=psmdb-client-696897d69b-4kvc6 + kubectl_bin exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.nVe8pfe1UU ++ mktemp + local LAST_ERR=/tmp/tmp.PlwqMzhsmM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-4kvc6 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-13274.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nVe8pfe1UU + cat /tmp/tmp.PlwqMzhsmM + rm /tmp/tmp.nVe8pfe1UU /tmp/tmp.PlwqMzhsmM + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/upgrade/compare/find.json /tmp/tmp.AXerBqiwXt/find + desc cleanup + set +o xtrace ----------------------------------------------------------------------------------- cleanup ----------------------------------------------------------------------------------- + destroy upgrade-13274 + local namespace=upgrade-13274 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.XcB3aGK5Gq +++ mktemp ++ local LAST_ERR=/tmp/tmp.sW4OyQYIRz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XcB3aGK5Gq ++ cat /tmp/tmp.sW4OyQYIRz ++ rm /tmp/tmp.XcB3aGK5Gq /tmp/tmp.sW4OyQYIRz ++ return 0 + '[' 1 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.cgrQqcY6KM ++ mktemp + local LAST_ERR=/tmp/tmp.9DNoiFPuFz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cgrQqcY6KM NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-minio upgrade minio s3://operator-testing/2026-01-21T14:25:39Z logical 47.22KB ready 3m32s 5m36s + cat /tmp/tmp.9DNoiFPuFz + rm /tmp/tmp.cgrQqcY6KM /tmp/tmp.9DNoiFPuFz + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.PY92P1mT8A ++ mktemp + local LAST_ERR=/tmp/tmp.nNTiddELsU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PY92P1mT8A perconaservermongodbbackup.psmdb.percona.com "backup-minio" deleted from upgrade-13274 namespace + cat /tmp/tmp.nNTiddELsU + rm /tmp/tmp.PY92P1mT8A /tmp/tmp.nNTiddELsU + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.P9ZVCCNmwL ++ mktemp + local LAST_ERR=/tmp/tmp.sqLJDMjjYk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P9ZVCCNmwL customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.sqLJDMjjYk + rm /tmp/tmp.P9ZVCCNmwL /tmp/tmp.sqLJDMjjYk + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.PtYgQXTL3H ++ mktemp + local LAST_ERR=/tmp/tmp.6Us1phMDiD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PtYgQXTL3H + cat /tmp/tmp.6Us1phMDiD + rm /tmp/tmp.PtYgQXTL3H /tmp/tmp.6Us1phMDiD + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.OpNIO5GCkF ++ mktemp + local LAST_ERR=/tmp/tmp.lsjXS1Gbos + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OpNIO5GCkF + cat /tmp/tmp.lsjXS1Gbos + rm /tmp/tmp.OpNIO5GCkF /tmp/tmp.lsjXS1Gbos + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n upgrade-13274 upgrade --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/upgrade patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.L2NjrEjvql ++ mktemp + local LAST_ERR=/tmp/tmp.TUEYAo6YGt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L2NjrEjvql customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.TUEYAo6YGt + rm /tmp/tmp.L2NjrEjvql /tmp/tmp.TUEYAo6YGt + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.sL46Vgec2C ++ mktemp + local LAST_ERR=/tmp/tmp.rOoFc992Jz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sL46Vgec2C clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.rOoFc992Jz + rm /tmp/tmp.sL46Vgec2C /tmp/tmp.rOoFc992Jz + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.iyHiIA5w1F ++ mktemp + local LAST_ERR=/tmp/tmp.tdBcZb8jO3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.iyHiIA5w1F + cat /tmp/tmp.tdBcZb8jO3 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.iyHiIA5w1F + cat /tmp/tmp.tdBcZb8jO3 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.iyHiIA5w1F + cat /tmp/tmp.tdBcZb8jO3 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.iyHiIA5w1F + cat /tmp/tmp.tdBcZb8jO3 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.iyHiIA5w1F /tmp/tmp.tdBcZb8jO3 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.AXerBqiwXt + kubectl_bin delete --grace-period=0 --force=true namespace upgrade-13274 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.XMk0YsvHN9 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_OUT=/tmp/tmp.7ZXWdg3cr7 ++ mktemp + local LAST_ERR=/tmp/tmp.kuUrOx8Elf + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.0u4lpIwHQ3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace upgrade-13274 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator