Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/logs/upgrade.log grep: warning: stray \ before - WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + cluster=upgrade + CLUSTER_SIZE=3 + TARGET_OPERATOR_VER=1.21.0 + TARGET_IMAGE=perconalab/percona-server-mongodb-operator:PR-1961-f6beb261 + TARGET_IMAGE_MONGOD=perconalab/percona-server-mongodb-operator:main-mongod7.0 + TARGET_IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest + TARGET_IMAGE_BACKUP=perconalab/percona-server-mongodb-operator:main-backup ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod7.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod7.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ local 'cli=mongod --version' +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ local pod_name=22043 +++ kubectl_bin -n default run 22043 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cOj52tC8kb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rxxkZWIRlZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 22043 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cOj52tC8kb +++ cat /tmp/tmp.rxxkZWIRlZ +++ rm /tmp/tmp.cOj52tC8kb /tmp/tmp.rxxkZWIRlZ +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/22043 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ys77b6ZUll ++++ mktemp +++ local LAST_ERR=/tmp/tmp.V4XEw91Ezx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/22043 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Ys77b6ZUll +++ cat /tmp/tmp.V4XEw91Ezx +++ rm /tmp/tmp.Ys77b6ZUll /tmp/tmp.V4XEw91Ezx +++ return 0 ++++ kubectl_bin -n default exec 22043 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.LzKSigOWAm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.W9Lc0gM6Cr ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 22043 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.LzKSigOWAm ++++ cat /tmp/tmp.W9Lc0gM6Cr ++++ rm /tmp/tmp.LzKSigOWAm /tmp/tmp.W9Lc0gM6Cr ++++ return 0 +++ local 'output=db version v7.0.22-12 Build Info: { "version": "7.0.22-12", "gitVersion": "dba13d69ddf3a3457b6e155d33709fd8bbf41a96", "openSSLVersion": "OpenSSL 3.2.2 4 Jun 2024", "modules": [], "proFeatures": [], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/22043 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TLXPDRNhH4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.foMcMJ7uzz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/22043 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TLXPDRNhH4 +++ cat /tmp/tmp.foMcMJ7uzz Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.TLXPDRNhH4 /tmp/tmp.foMcMJ7uzz +++ return 0 +++ echo db version v7.0.22-12 Build Info: '{' '"version":' '"7.0.22-12",' '"gitVersion":' '"dba13d69ddf3a3457b6e155d33709fd8bbf41a96",' '"openSSLVersion":' '"OpenSSL' 3.2.2 4 Jun '2024",' '"modules":' '[],' '"proFeatures":' '[],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=7.0.22-12 ++ [[ ! 7.0.22-12 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 7.0.22-12 + FULL_VER=7.0.22-12 + MONGO_VER=7.0 ++ tail -n1 ++ curl -s https://check.percona.com/versions/v1/psmdb-operator ++ jq -r '.versions[].operator' ++ sort -V + INIT_OPERATOR_VER=1.20.1 + [[ 1.20.1 == \1\.\2\1\.\0 ]] + GIT_TAG=v1.20.1 + case $(curl -s -o /dev/null -w "%{http_code}" 'https://check.percona.com/versions/v1/psmdb-operator/'${INIT_OPERATOR_VER}'/latest?databaseVersion='${MONGO_VER}'') in ++ curl -s -o /dev/null -w '%{http_code}' 'https://check.percona.com/versions/v1/psmdb-operator/1.20.1/latest?databaseVersion=7.0' ++ curl -s 'https://check.percona.com/versions/v1/psmdb-operator/1.20.1/latest?databaseVersion=7.0' + INIT_OPERATOR_IMAGES='{"versions":[{"product":"psmdb-operator","operator":"1.20.1","matrix":{"mongod":{"7.0.18-11":{"imagePath":"percona/percona-server-mongodb:7.0.18-11","imageHash":"0115a72f5e60d86cb4f4b7eae32118c0910e8c96831e013de12798a1771c4c91","imageHashArm64":"86c17067f3e233f522612389ed2500231cbb22ce93524c476b9aa8d464d06f0b","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"2f9c79934b85541afe815a1e1e58a03c66bf953abea3932ddd8486408b867c41","imageHashArm64":"","status":"recommended","critical":true}},"proxysql":{},"haproxy":{},"backup":{"2.9.1":{"imagePath":"percona/percona-backup-mongodb:2.9.1","imageHash":"976bfbaa548eb70dd90bf0bd2dcfe40b2994d749ef644af3a0590f4856e4d7e2","imageHashArm64":"ebc6e5c5aa3ed97991d3fd90e9201597b485ddc0eae8d7ee4311ecb785c03bf0","status":"recommended","critical":false}},"operator":{"1.20.1":{"imagePath":"percona/percona-server-mongodb-operator:1.20.1","imageHash":"b559cdd349916d806f6b13b4ac43fdbae982298fad2088b649631a356020ee46","imageHashArm64":"5a66e497dd1650e5a1123659292fe4c615e0ab5ce7e5d8437bf2101f91b625e1","status":"recommended","critical":false}},"logCollector":{},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' + OPERATOR_NAME=percona-server-mongodb-operator ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.20.1","matrix":{"mongod":{"7.0.18-11":{"imagePath":"percona/percona-server-mongodb:7.0.18-11","imageHash":"0115a72f5e60d86cb4f4b7eae32118c0910e8c96831e013de12798a1771c4c91","imageHashArm64":"86c17067f3e233f522612389ed2500231cbb22ce93524c476b9aa8d464d06f0b","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"2f9c79934b85541afe815a1e1e58a03c66bf953abea3932ddd8486408b867c41","imageHashArm64":"","status":"recommended","critical":true}},"proxysql":{},"haproxy":{},"backup":{"2.9.1":{"imagePath":"percona/percona-backup-mongodb:2.9.1","imageHash":"976bfbaa548eb70dd90bf0bd2dcfe40b2994d749ef644af3a0590f4856e4d7e2","imageHashArm64":"ebc6e5c5aa3ed97991d3fd90e9201597b485ddc0eae8d7ee4311ecb785c03bf0","status":"recommended","critical":false}},"operator":{"1.20.1":{"imagePath":"percona/percona-server-mongodb-operator:1.20.1","imageHash":"b559cdd349916d806f6b13b4ac43fdbae982298fad2088b649631a356020ee46","imageHashArm64":"5a66e497dd1650e5a1123659292fe4c615e0ab5ce7e5d8437bf2101f91b625e1","status":"recommended","critical":false}},"logCollector":{},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.operator[].imagePath' + IMAGE=percona/percona-server-mongodb-operator:1.20.1 ++ echo perconalab/percona-server-mongodb-operator:PR-1961-f6beb261 ++ cut -d/ -f1 + [[ perconalab == \p\e\r\c\o\n\a\l\a\b ]] + IMAGE=perconalab/percona-server-mongodb-operator:1.20.1 ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.20.1","matrix":{"mongod":{"7.0.18-11":{"imagePath":"percona/percona-server-mongodb:7.0.18-11","imageHash":"0115a72f5e60d86cb4f4b7eae32118c0910e8c96831e013de12798a1771c4c91","imageHashArm64":"86c17067f3e233f522612389ed2500231cbb22ce93524c476b9aa8d464d06f0b","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"2f9c79934b85541afe815a1e1e58a03c66bf953abea3932ddd8486408b867c41","imageHashArm64":"","status":"recommended","critical":true}},"proxysql":{},"haproxy":{},"backup":{"2.9.1":{"imagePath":"percona/percona-backup-mongodb:2.9.1","imageHash":"976bfbaa548eb70dd90bf0bd2dcfe40b2994d749ef644af3a0590f4856e4d7e2","imageHashArm64":"ebc6e5c5aa3ed97991d3fd90e9201597b485ddc0eae8d7ee4311ecb785c03bf0","status":"recommended","critical":false}},"operator":{"1.20.1":{"imagePath":"percona/percona-server-mongodb-operator:1.20.1","imageHash":"b559cdd349916d806f6b13b4ac43fdbae982298fad2088b649631a356020ee46","imageHashArm64":"5a66e497dd1650e5a1123659292fe4c615e0ab5ce7e5d8437bf2101f91b625e1","status":"recommended","critical":false}},"logCollector":{},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.mongod[].imagePath' + IMAGE_MONGOD=percona/percona-server-mongodb:7.0.18-11 ++ jq -r '.versions[].matrix.pmm[].imagePath' ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.20.1","matrix":{"mongod":{"7.0.18-11":{"imagePath":"percona/percona-server-mongodb:7.0.18-11","imageHash":"0115a72f5e60d86cb4f4b7eae32118c0910e8c96831e013de12798a1771c4c91","imageHashArm64":"86c17067f3e233f522612389ed2500231cbb22ce93524c476b9aa8d464d06f0b","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"2f9c79934b85541afe815a1e1e58a03c66bf953abea3932ddd8486408b867c41","imageHashArm64":"","status":"recommended","critical":true}},"proxysql":{},"haproxy":{},"backup":{"2.9.1":{"imagePath":"percona/percona-backup-mongodb:2.9.1","imageHash":"976bfbaa548eb70dd90bf0bd2dcfe40b2994d749ef644af3a0590f4856e4d7e2","imageHashArm64":"ebc6e5c5aa3ed97991d3fd90e9201597b485ddc0eae8d7ee4311ecb785c03bf0","status":"recommended","critical":false}},"operator":{"1.20.1":{"imagePath":"percona/percona-server-mongodb-operator:1.20.1","imageHash":"b559cdd349916d806f6b13b4ac43fdbae982298fad2088b649631a356020ee46","imageHashArm64":"5a66e497dd1650e5a1123659292fe4c615e0ab5ce7e5d8437bf2101f91b625e1","status":"recommended","critical":false}},"logCollector":{},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' + IMAGE_PMM_CLIENT=percona/pmm-client:2.44.1-1 ++ echo '{"versions":[{"product":"psmdb-operator","operator":"1.20.1","matrix":{"mongod":{"7.0.18-11":{"imagePath":"percona/percona-server-mongodb:7.0.18-11","imageHash":"0115a72f5e60d86cb4f4b7eae32118c0910e8c96831e013de12798a1771c4c91","imageHashArm64":"86c17067f3e233f522612389ed2500231cbb22ce93524c476b9aa8d464d06f0b","status":"recommended","critical":false}},"pxc":{},"pmm":{"2.44.1-1":{"imagePath":"percona/pmm-client:2.44.1-1","imageHash":"2f9c79934b85541afe815a1e1e58a03c66bf953abea3932ddd8486408b867c41","imageHashArm64":"","status":"recommended","critical":true}},"proxysql":{},"haproxy":{},"backup":{"2.9.1":{"imagePath":"percona/percona-backup-mongodb:2.9.1","imageHash":"976bfbaa548eb70dd90bf0bd2dcfe40b2994d749ef644af3a0590f4856e4d7e2","imageHashArm64":"ebc6e5c5aa3ed97991d3fd90e9201597b485ddc0eae8d7ee4311ecb785c03bf0","status":"recommended","critical":false}},"operator":{"1.20.1":{"imagePath":"percona/percona-server-mongodb-operator:1.20.1","imageHash":"b559cdd349916d806f6b13b4ac43fdbae982298fad2088b649631a356020ee46","imageHashArm64":"5a66e497dd1650e5a1123659292fe4c615e0ab5ce7e5d8437bf2101f91b625e1","status":"recommended","critical":false}},"logCollector":{},"postgresql":{},"pgbackrest":{},"pgbackrestRepo":{},"pgbadger":{},"pgbouncer":{},"pxcOperator":{},"psmdbOperator":{},"pgOperatorApiserver":{},"pgOperatorEvent":{},"pgOperatorRmdata":{},"pgOperatorScheduler":{},"pgOperator":{},"pgOperatorDeployer":{},"psOperator":{},"mysql":{},"router":{},"orchestrator":{},"toolkit":{},"postgis":{}}}]}' ++ jq -r '.versions[].matrix.backup[].imagePath' + IMAGE_BACKUP=percona/percona-backup-mongodb:2.9.1 + [[ 1.21.0 == \1\.\2\0\.\1 ]] + main + rbac=rbac + '[' -n psmdb-operator ']' + rbac=cw-rbac + create_infra_gh upgrade-18945 v1.20.1 + local ns=upgrade-18945 + local git_tag=v1.20.1 + check_crd_for_deletion v1.20.1 + local git_tag=v1.20.1 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/perconaservermongodbbackups.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uuZUxd8uT1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ctHcF2k1Fp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/perconaservermongodbbackups.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uuZUxd8uT1 ++ cat /tmp/tmp.ctHcF2k1Fp ++ rm /tmp/tmp.uuZUxd8uT1 /tmp/tmp.ctHcF2k1Fp ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/perconaservermongodbrestores.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.001wVmu4X7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mdQMWQVkrP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/perconaservermongodbrestores.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.001wVmu4X7 ++ cat /tmp/tmp.mdQMWQVkrP ++ rm /tmp/tmp.001wVmu4X7 /tmp/tmp.mdQMWQVkrP ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/perconaservermongodbs.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fCPbsG9KQw +++ mktemp ++ local LAST_ERR=/tmp/tmp.FyYmYW0N8O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/perconaservermongodbs.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fCPbsG9KQw ++ cat /tmp/tmp.FyYmYW0N8O ++ rm /tmp/tmp.fCPbsG9KQw /tmp/tmp.FyYmYW0N8O ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.JQweZBxnhK + local LAST_OUT=/tmp/tmp.HnuUmR0Z9I egrep: warning: egrep is obsolescent; using grep -E ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.P7uYv4bskD + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.fNCqaKPyZT + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JQweZBxnhK + cat /tmp/tmp.P7uYv4bskD + rm /tmp/tmp.JQweZBxnhK /tmp/tmp.P7uYv4bskD + return 0 namespace "upgrade-5080" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HnuUmR0Z9I namespace "psmdb-operator" deleted + cat /tmp/tmp.fNCqaKPyZT + rm /tmp/tmp.HnuUmR0Z9I /tmp/tmp.fNCqaKPyZT + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fNppSOWcsA ++ mktemp + local LAST_ERR=/tmp/tmp.ixGEeL7r5C + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fNppSOWcsA + cat /tmp/tmp.ixGEeL7r5C + rm /tmp/tmp.fNppSOWcsA /tmp/tmp.ixGEeL7r5C + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.UViLqrk1rU ++ mktemp + local LAST_ERR=/tmp/tmp.4j29uzOwNi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UViLqrk1rU namespace/psmdb-operator created + cat /tmp/tmp.4j29uzOwNi + rm /tmp/tmp.UViLqrk1rU /tmp/tmp.4j29uzOwNi + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.XvF646Hv4N +++ mktemp ++ local LAST_ERR=/tmp/tmp.bGO7KP8Hjr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XvF646Hv4N ++ cat /tmp/tmp.bGO7KP8Hjr ++ rm /tmp/tmp.XvF646Hv4N /tmp/tmp.bGO7KP8Hjr ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster2 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fc6TiX19X9 ++ mktemp + local LAST_ERR=/tmp/tmp.GpZ99fllz7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster2 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fc6TiX19X9 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster2" modified. + cat /tmp/tmp.GpZ99fllz7 + rm /tmp/tmp.fc6TiX19X9 /tmp/tmp.GpZ99fllz7 + return 0 + deploy_operator_gh v1.20.1 + local git_tag=v1.20.1 + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + kubectl_bin apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/crd.yaml --server-side ++ mktemp + local LAST_OUT=/tmp/tmp.SxPaNJGwmA ++ mktemp + local LAST_ERR=/tmp/tmp.fq9tam6O8h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/crd.yaml --server-side + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SxPaNJGwmA customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.fq9tam6O8h + rm /tmp/tmp.SxPaNJGwmA /tmp/tmp.fq9tam6O8h + return 0 + local rbac_yaml=rbac + local operator_yaml=operator + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac + operator_yaml=cw-operator + kubectl_bin apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.nopsJCEaZO ++ mktemp + local LAST_ERR=/tmp/tmp.Nupu9ZEFUQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/cw-rbac.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nopsJCEaZO clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator unchanged serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator unchanged + cat /tmp/tmp.Nupu9ZEFUQ + rm /tmp/tmp.nopsJCEaZO /tmp/tmp.Nupu9ZEFUQ + return 0 + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/cw-operator.yaml + /usr/sbin/sed -i -e 's^image: .*^image: perconalab/percona-server-mongodb-operator:1.20.1^' /tmp/tmp.DvEhBb5T17/cw-operator_v1.20.1.yaml + kubectl_bin apply -f /tmp/tmp.DvEhBb5T17/cw-operator_v1.20.1.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.lP3OpS9bqK ++ mktemp + local LAST_ERR=/tmp/tmp.PJOLdPqrRe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /tmp/tmp.DvEhBb5T17/cw-operator_v1.20.1.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lP3OpS9bqK deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.PJOLdPqrRe + rm /tmp/tmp.lP3OpS9bqK /tmp/tmp.PJOLdPqrRe + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.1cFkVzJuh7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.b7IvZJXf3W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1cFkVzJuh7 ++ cat /tmp/tmp.b7IvZJXf3W ++ rm /tmp/tmp.1cFkVzJuh7 /tmp/tmp.b7IvZJXf3W ++ return 0 + wait_pod percona-server-mongodb-operator-78d4cb8b56-b8v2j + local pod=percona-server-mongodb-operator-78d4cb8b56-b8v2j + set +o xtrace waiting for pod/percona-server-mongodb-operator-78d4cb8b56-b8v2j to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.J8ND10OgGe +++ mktemp ++ local LAST_ERR=/tmp/tmp.OXnscWIbUQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J8ND10OgGe ++ cat /tmp/tmp.OXnscWIbUQ ++ rm /tmp/tmp.J8ND10OgGe /tmp/tmp.OXnscWIbUQ ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-78d4cb8b56-b8v2j ++ mktemp + local LAST_OUT=/tmp/tmp.fajbxht25s ++ mktemp + local LAST_ERR=/tmp/tmp.4tIvf7hf1U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs percona-server-mongodb-operator-78d4cb8b56-b8v2j + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fajbxht25s + cat /tmp/tmp.4tIvf7hf1U + rm /tmp/tmp.fajbxht25s /tmp/tmp.4tIvf7hf1U + return 0 2025-08-14T11:37:43.275Z INFO setup Manager starting up {"gitCommit": "30d9ec941baf57619c8973249a3c5d3fd5cc08f4", "gitBranch": "release-1-20-1", "buildTime": "", "goVersion": "go1.24.3", "os": "linux", "arch": "amd64"} + create_namespace upgrade-18945 + local namespace=upgrade-18945 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' ++ mktemp + xargs kubectl delete ns + desc 'cleaned up old namespaces upgrade-18945' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-18945 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-18945 --ignore-not-found egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.oO2Abffqw0 ++ mktemp + local LAST_OUT=/tmp/tmp.zVcdJH1Jsa + local LAST_ERR=/tmp/tmp.bbzqPL2sPI + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.IyWftgYNlA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace upgrade-18945 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oO2Abffqw0 + cat /tmp/tmp.bbzqPL2sPI + rm /tmp/tmp.oO2Abffqw0 /tmp/tmp.bbzqPL2sPI + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zVcdJH1Jsa + cat /tmp/tmp.IyWftgYNlA + rm /tmp/tmp.zVcdJH1Jsa /tmp/tmp.IyWftgYNlA + return 0 + kubectl_bin wait --for=delete namespace upgrade-18945 ++ mktemp + local LAST_OUT=/tmp/tmp.gFh7obM0Qb ++ mktemp + local LAST_ERR=/tmp/tmp.ye7Nl6VIcv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace upgrade-18945 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gFh7obM0Qb + cat /tmp/tmp.ye7Nl6VIcv + rm /tmp/tmp.gFh7obM0Qb /tmp/tmp.ye7Nl6VIcv + return 0 + desc 'create namespace upgrade-18945' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-18945 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-18945 ++ mktemp + local LAST_OUT=/tmp/tmp.ExljhIXKGK ++ mktemp + local LAST_ERR=/tmp/tmp.WEtWk90FG0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace upgrade-18945 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ExljhIXKGK namespace/upgrade-18945 created + cat /tmp/tmp.WEtWk90FG0 + rm /tmp/tmp.ExljhIXKGK /tmp/tmp.WEtWk90FG0 + return 0 + set_kube_ctx upgrade-18945 + local namespace=upgrade-18945 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Lele84KFxX +++ mktemp ++ local LAST_ERR=/tmp/tmp.X7Yzc5xe3l ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Lele84KFxX ++ cat /tmp/tmp.X7Yzc5xe3l ++ rm /tmp/tmp.Lele84KFxX /tmp/tmp.X7Yzc5xe3l ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster2 --namespace=upgrade-18945 ++ mktemp + local LAST_OUT=/tmp/tmp.PX0D8JAy0Y ++ mktemp + local LAST_ERR=/tmp/tmp.NgnZ3phDcs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster2 --namespace=upgrade-18945 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PX0D8JAy0Y Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster2" modified. + cat /tmp/tmp.NgnZ3phDcs + rm /tmp/tmp.PX0D8JAy0Y /tmp/tmp.NgnZ3phDcs + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.4Ju2Yy1LdI ++ mktemp + local LAST_ERR=/tmp/tmp.cwRhueFhrA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4Ju2Yy1LdI secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.cwRhueFhrA + rm /tmp/tmp.4Ju2Yy1LdI /tmp/tmp.cwRhueFhrA + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Thu Aug 14 11:38:05 2025 NAMESPACE: upgrade-18945 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.upgrade-18945.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace upgrade-18945 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace upgrade-18945 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace upgrade-18945 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace upgrade-18945 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4nJxeKwo87 +++ mktemp ++ local LAST_ERR=/tmp/tmp.wH87LwIePs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4nJxeKwo87 ++ cat /tmp/tmp.wH87LwIePs ++ rm /tmp/tmp.4nJxeKwo87 /tmp/tmp.wH87LwIePs ++ return 0 + MINIO_POD=minio-service-86dfccd949-dls7l + wait_pod minio-service-86dfccd949-dls7l + local pod=minio-service-86dfccd949-dls7l + set +o xtrace waiting for pod/minio-service-86dfccd949-dls7l to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.upgrade-18945.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.S608oso3uK ++ mktemp + local LAST_ERR=/tmp/tmp.KDjda2m2ZI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.upgrade-18945.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S608oso3uK service/minio-service created + cat /tmp/tmp.KDjda2m2ZI + rm /tmp/tmp.S608oso3uK /tmp/tmp.KDjda2m2ZI + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.9hf6Sc2xT3 ++ mktemp + local LAST_ERR=/tmp/tmp.TQg9gfPdsF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9hf6Sc2xT3 make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.TQg9gfPdsF If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_upgrade-18945 + rm /tmp/tmp.9hf6Sc2xT3 /tmp/tmp.TQg9gfPdsF + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/secrets.yaml + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml -f /tmp/tmp.DvEhBb5T17/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.KqTpc9IuGN ++ mktemp + local LAST_ERR=/tmp/tmp.c7vPs7TVKh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml -f /tmp/tmp.DvEhBb5T17/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KqTpc9IuGN deployment.apps/psmdb-client created secret/my-cluster-name-secrets created + cat /tmp/tmp.c7vPs7TVKh + rm /tmp/tmp.KqTpc9IuGN /tmp/tmp.c7vPs7TVKh + return 0 + local cr_yaml=/tmp/tmp.DvEhBb5T17/cr_v1.20.1.yaml + prepare_cr_yaml /tmp/tmp.DvEhBb5T17/cr_v1.20.1.yaml + local cr_yaml=/tmp/tmp.DvEhBb5T17/cr_v1.20.1.yaml + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/cr.yaml + yq eval ' .metadata.name = "upgrade" | .spec.upgradeOptions.apply = "disabled" | .spec.replsets[].size = 3 | .spec.replsets[].arbiter.enabled = false | .spec.backup.enabled = true | .spec.backup.pitr.enabled = false | .spec.backup.storages.minio.type = "s3" | .spec.backup.storages.minio.s3.credentialsSecret = "minio-secret" | .spec.backup.storages.minio.s3.region = "us-east-1" | .spec.backup.storages.minio.s3.bucket = "operator-testing" | .spec.backup.storages.minio.s3.endpointUrl = "http://minio-service:9000/" | .spec.sharding.enabled = false | .spec.image="" | .spec.image tag="!!null" | .spec.backup.image = "-backup" | .spec.pmm.image = "-pmm"' + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + apply_cluster /tmp/tmp.DvEhBb5T17/cr_v1.20.1.yaml + '[' -z '' ']' + cat_config /tmp/tmp.DvEhBb5T17/cr_v1.20.1.yaml + kubectl_bin apply -f - + cat /tmp/tmp.DvEhBb5T17/cr_v1.20.1.yaml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "percona/percona-server-mongodb:7.0.18-11"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:1.20.1"' + yq eval '(.spec | select(has("backup"))).backup.image = "percona/percona-backup-mongodb:2.9.1"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.41syoAxvVI ++ mktemp + local LAST_ERR=/tmp/tmp.xtr4BPLISy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.41syoAxvVI perconaservermongodb.psmdb.percona.com/upgrade created + cat /tmp/tmp.xtr4BPLISy + rm /tmp/tmp.41syoAxvVI /tmp/tmp.xtr4BPLISy + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running upgrade-rs0 3 + local name=upgrade-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-rs0-0 + local pod=upgrade-rs0-0 + set +o xtrace waiting for pod/upgrade-rs0-0 to be ready........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-rs0-1 + local pod=upgrade-rs0-1 + set +o xtrace waiting for pod/upgrade-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PrwRdZD7oY +++ mktemp ++ local LAST_ERR=/tmp/tmp.kJWMtV9CwQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PrwRdZD7oY ++ cat /tmp/tmp.kJWMtV9CwQ ++ rm /tmp/tmp.PrwRdZD7oY /tmp/tmp.kJWMtV9CwQ ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-rs0-2 + local pod=upgrade-rs0-2 + set +o xtrace waiting for pod/upgrade-rs0-2 to be ready........OK ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RlFM7j86hs +++ mktemp ++ local LAST_ERR=/tmp/tmp.aSf2D9V7Mk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RlFM7j86hs ++ cat /tmp/tmp.aSf2D9V7Mk ++ rm /tmp/tmp.RlFM7j86hs /tmp/tmp.aSf2D9V7Mk ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TPbrtNz1tz +++ mktemp ++ local LAST_ERR=/tmp/tmp.2dv3Y9VT6I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TPbrtNz1tz ++ cat /tmp/tmp.2dv3Y9VT6I ++ rm /tmp/tmp.TPbrtNz1tz /tmp/tmp.2dv3Y9VT6I ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.. + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@upgrade-rs0.upgrade-18945 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@upgrade-rs0.upgrade-18945 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JnDP3uvjB7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.o6XqBevus9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JnDP3uvjB7 ++ cat /tmp/tmp.o6XqBevus9 ++ rm /tmp/tmp.JnDP3uvjB7 /tmp/tmp.o6XqBevus9 ++ return 0 + local client_container=psmdb-client-66f577db5f-wmvgp + local mongo_flag= + [[ userAdmin:userAdmin123456@upgrade-rs0.upgrade-18945 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PCJcOPrwib ++ mktemp + local LAST_ERR=/tmp/tmp.YyRH2dFAan + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PCJcOPrwib Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-rs0-1.upgrade-rs0.upgrade-18945.svc.cluster.local:27017,upgrade-rs0-2.upgrade-rs0.upgrade-18945.svc.cluster.local:27017,upgrade-rs0-0.upgrade-rs0.upgrade-18945.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("5e4da8fa-5211-4046-bf4f-a88de914dee6") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.YyRH2dFAan + rm /tmp/tmp.PCJcOPrwib /tmp/tmp.YyRH2dFAan + return 0 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@upgrade-rs0.upgrade-18945 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@upgrade-rs0.upgrade-18945 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1smPQJf31f +++ mktemp ++ local LAST_ERR=/tmp/tmp.bpXh2OwJZf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1smPQJf31f ++ cat /tmp/tmp.bpXh2OwJZf ++ rm /tmp/tmp.1smPQJf31f /tmp/tmp.bpXh2OwJZf ++ return 0 + local client_container=psmdb-client-66f577db5f-wmvgp + local mongo_flag= + [[ myApp:myPass@upgrade-rs0.upgrade-18945 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.f98nuFOKVV ++ mktemp + local LAST_ERR=/tmp/tmp.3SvVJenWSs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f98nuFOKVV Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-rs0-2.upgrade-rs0.upgrade-18945.svc.cluster.local:27017,upgrade-rs0-0.upgrade-rs0.upgrade-18945.svc.cluster.local:27017,upgrade-rs0-1.upgrade-rs0.upgrade-18945.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("9c3c9f65-9f2f-49f2-b86d-48d54f4ebdae") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.3SvVJenWSs + rm /tmp/tmp.f98nuFOKVV /tmp/tmp.3SvVJenWSs + return 0 + desc 'check if cr and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if cr and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_generation 1 statefulset upgrade-rs0 + local generation=1 + local resource=statefulset + local name=upgrade-rs0 + local current_generation ++ kubectl_bin get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XxTXjsGkC2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.PlyHuyCXre ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XxTXjsGkC2 ++ cat /tmp/tmp.PlyHuyCXre ++ rm /tmp/tmp.XxTXjsGkC2 /tmp/tmp.PlyHuyCXre ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 psmdb upgrade + local generation=1 + local resource=psmdb + local name=upgrade + local current_generation ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.igE4NQWBGC +++ mktemp ++ local LAST_ERR=/tmp/tmp.QIXlKJ9aeU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.igE4NQWBGC ++ cat /tmp/tmp.QIXlKJ9aeU ++ rm /tmp/tmp.igE4NQWBGC /tmp/tmp.QIXlKJ9aeU ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'create backup ' + set +o xtrace ----------------------------------------------------------------------------------- create backup ----------------------------------------------------------------------------------- + backup_name_minio=backup-minio + run_backup minio + local storage=minio + local backup_name=backup-minio + local type=logical + desc 'run backup backup-minio' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-minio" | .spec.storageName = "minio" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/upgrade/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.U2b9I61lp1 ++ mktemp + local LAST_ERR=/tmp/tmp.3WylEgSgq1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U2b9I61lp1 perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.3WylEgSgq1 + rm /tmp/tmp.U2b9I61lp1 /tmp/tmp.3WylEgSgq1 + return 0 + wait_backup backup-minio + local backup_name=backup-minio + local target_state=ready + set +o xtrace waiting for backup-minio to reach ready state...... + desc 'upgrade operator' + set +o xtrace ----------------------------------------------------------------------------------- upgrade operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.cIiOxkhHVJ ++ mktemp + local LAST_ERR=/tmp/tmp.OFbxXniVAg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cIiOxkhHVJ customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.OFbxXniVAg + rm /tmp/tmp.cIiOxkhHVJ /tmp/tmp.OFbxXniVAg + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.c7ppVa8e2Y ++ mktemp + local LAST_ERR=/tmp/tmp.K02Vc4JaVn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c7ppVa8e2Y clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator unchanged serviceaccount/percona-server-mongodb-operator unchanged clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator unchanged + cat /tmp/tmp.K02Vc4JaVn + rm /tmp/tmp.c7ppVa8e2Y /tmp/tmp.K02Vc4JaVn + return 0 + kubectl_bin patch deployment percona-server-mongodb-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-server-mongodb-operator","image":"perconalab/percona-server-mongodb-operator:PR-1961-f6beb261"}]}}}}' -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.lbmU0CstuM ++ mktemp + local LAST_ERR=/tmp/tmp.CtDmF8U6BQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch deployment percona-server-mongodb-operator '-p{"spec":{"template":{"spec":{"containers":[{"name":"percona-server-mongodb-operator","image":"perconalab/percona-server-mongodb-operator:PR-1961-f6beb261"}]}}}}' -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lbmU0CstuM deployment.apps/percona-server-mongodb-operator patched + cat /tmp/tmp.CtDmF8U6BQ + rm /tmp/tmp.lbmU0CstuM /tmp/tmp.CtDmF8U6BQ + return 0 + kubectl_bin rollout status deployment/percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.qTElusnBdB ++ mktemp + local LAST_ERR=/tmp/tmp.zkC2WM7YXa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl rollout status deployment/percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qTElusnBdB Waiting for deployment "percona-server-mongodb-operator" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "percona-server-mongodb-operator" rollout to finish: 1 old replicas are pending termination... deployment "percona-server-mongodb-operator" successfully rolled out + cat /tmp/tmp.zkC2WM7YXa + rm /tmp/tmp.qTElusnBdB /tmp/tmp.zkC2WM7YXa + return 0 + desc 'wait for operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- wait for operator upgrade ----------------------------------------------------------------------------------- ++ kubectl_bin get pods -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ grep -vc NAME ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lC9M3Fsess +++ mktemp ++ local LAST_ERR=/tmp/tmp.bgtBgmD6me ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'custom-columns=NAME:.metadata.name,IMAGE:.spec.containers[0].image' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lC9M3Fsess ++ cat /tmp/tmp.bgtBgmD6me ++ rm /tmp/tmp.lC9M3Fsess /tmp/tmp.bgtBgmD6me ++ return 0 + [[ 1 -eq 1 ]] + sleep 10 + desc 'check images and generation after operator upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check images and generation after operator upgrade ----------------------------------------------------------------------------------- + wait_for_running upgrade-rs0 3 + local name=upgrade-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-rs0-0 + local pod=upgrade-rs0-0 + set +o xtrace waiting for pod/upgrade-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-rs0-1 + local pod=upgrade-rs0-1 + set +o xtrace waiting for pod/upgrade-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OAyNj60rgd +++ mktemp ++ local LAST_ERR=/tmp/tmp.CuIjhM5Vaz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OAyNj60rgd ++ cat /tmp/tmp.CuIjhM5Vaz ++ rm /tmp/tmp.OAyNj60rgd /tmp/tmp.CuIjhM5Vaz ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-rs0-2 + local pod=upgrade-rs0-2 + set +o xtrace waiting for pod/upgrade-rs0-2 to be ready.OK ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fB49YSe8ag +++ mktemp ++ local LAST_ERR=/tmp/tmp.6KLfMJEkU0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fB49YSe8ag ++ cat /tmp/tmp.6KLfMJEkU0 ++ rm /tmp/tmp.fB49YSe8ag /tmp/tmp.6KLfMJEkU0 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lzLZn8hZ2s +++ mktemp ++ local LAST_ERR=/tmp/tmp.0c7oKfIMqU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lzLZn8hZ2s ++ cat /tmp/tmp.0c7oKfIMqU ++ rm /tmp/tmp.lzLZn8hZ2s /tmp/tmp.0c7oKfIMqU ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + check_applied_images operator + local updated_image=operator + case "${updated_image}" in ++ kubectl_bin get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kDzumbMNh6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vtxoZGv0fG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kDzumbMNh6 ++ cat /tmp/tmp.vtxoZGv0fG ++ rm /tmp/tmp.kDzumbMNh6 /tmp/tmp.vtxoZGv0fG ++ return 0 + [[ perconalab/percona-server-mongodb-operator:PR-1961-f6beb261 == perconalab/percona-server-mongodb-operator:PR-1961-f6beb261 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iTSzJPrZgA +++ mktemp ++ local LAST_ERR=/tmp/tmp.l5xL2Stpc3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iTSzJPrZgA ++ cat /tmp/tmp.l5xL2Stpc3 ++ rm /tmp/tmp.iTSzJPrZgA /tmp/tmp.l5xL2Stpc3 ++ return 0 + [[ percona/percona-backup-mongodb:2.9.1 == percona/percona-backup-mongodb:2.9.1 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HWpjP8am3N +++ mktemp ++ local LAST_ERR=/tmp/tmp.gE9clLN6X2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HWpjP8am3N ++ cat /tmp/tmp.gE9clLN6X2 ++ rm /tmp/tmp.HWpjP8am3N /tmp/tmp.gE9clLN6X2 ++ return 0 + [[ percona/pmm-client:2.44.1-1 == percona/pmm-client:2.44.1-1 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZxzSUm1mDH +++ mktemp ++ local LAST_ERR=/tmp/tmp.8QXu14kZX0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZxzSUm1mDH ++ cat /tmp/tmp.8QXu14kZX0 ++ rm /tmp/tmp.ZxzSUm1mDH /tmp/tmp.8QXu14kZX0 ++ return 0 + [[ percona/percona-server-mongodb:7.0.18-11 == percona/percona-server-mongodb:7.0.18-11 ]] + : Operator image has been updated correctly + compare_generation 1 statefulset upgrade-rs0 + local generation=1 + local resource=statefulset + local name=upgrade-rs0 + local current_generation ++ kubectl_bin get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gvEVdBfkJp +++ mktemp ++ local LAST_ERR=/tmp/tmp.oarAW6t6XL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gvEVdBfkJp ++ cat /tmp/tmp.oarAW6t6XL ++ rm /tmp/tmp.gvEVdBfkJp /tmp/tmp.oarAW6t6XL ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 psmdb upgrade + local generation=1 + local resource=psmdb + local name=upgrade + local current_generation ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Le4Rb4fsw6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rOLlA2jqQY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Le4Rb4fsw6 ++ cat /tmp/tmp.rOLlA2jqQY ++ rm /tmp/tmp.Le4Rb4fsw6 /tmp/tmp.rOLlA2jqQY ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + desc 'patch psmdb images and upgrade' + set +o xtrace ----------------------------------------------------------------------------------- patch psmdb images and upgrade ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb upgrade --type=merge --patch '{ "spec": { "crVersion": "1.21.0", "image": "perconalab/percona-server-mongodb-operator:main-mongod7.0", "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "backup": { "image": "perconalab/percona-server-mongodb-operator:main-backup" } }}' ++ mktemp + local LAST_OUT=/tmp/tmp.LFH89MW5zJ ++ mktemp + local LAST_ERR=/tmp/tmp.0zDHmoNC7h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb upgrade --type=merge --patch '{ "spec": { "crVersion": "1.21.0", "image": "perconalab/percona-server-mongodb-operator:main-mongod7.0", "pmm": { "image": "perconalab/pmm-client:dev-latest" }, "backup": { "image": "perconalab/percona-server-mongodb-operator:main-backup" } }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LFH89MW5zJ perconaservermongodb.psmdb.percona.com/upgrade patched + cat /tmp/tmp.0zDHmoNC7h + rm /tmp/tmp.LFH89MW5zJ /tmp/tmp.0zDHmoNC7h + return 0 + sleep 10 + desc 'check cluster after full upgrade' + set +o xtrace ----------------------------------------------------------------------------------- check cluster after full upgrade ----------------------------------------------------------------------------------- + wait_for_running upgrade-rs0 3 + local name=upgrade-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=upgrade ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod upgrade-rs0-0 + local pod=upgrade-rs0-0 + set +o xtrace waiting for pod/upgrade-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod upgrade-rs0-1 + local pod=upgrade-rs0-1 + set +o xtrace waiting for pod/upgrade-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JMmgbrpkZf +++ mktemp ++ local LAST_ERR=/tmp/tmp.XKxBFZMHqS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JMmgbrpkZf ++ cat /tmp/tmp.XKxBFZMHqS ++ rm /tmp/tmp.JMmgbrpkZf /tmp/tmp.XKxBFZMHqS ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod upgrade-rs0-2 + local pod=upgrade-rs0-2 + set +o xtrace waiting for pod/upgrade-rs0-2 to be ready.OK ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.44U7QRqiYW +++ mktemp ++ local LAST_ERR=/tmp/tmp.SXOC1pTadM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.44U7QRqiYW ++ cat /tmp/tmp.SXOC1pTadM ++ rm /tmp/tmp.44U7QRqiYW /tmp/tmp.SXOC1pTadM ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hazoBXrQqB +++ mktemp ++ local LAST_ERR=/tmp/tmp.DZZVWvIbY8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hazoBXrQqB ++ cat /tmp/tmp.DZZVWvIbY8 ++ rm /tmp/tmp.hazoBXrQqB /tmp/tmp.DZZVWvIbY8 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness...................................... + wait_cluster_consistency upgrade + local retry=0 ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aBObjQnBNf +++ mktemp ++ local LAST_ERR=/tmp/tmp.asvJXSohmw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aBObjQnBNf ++ cat /tmp/tmp.asvJXSohmw ++ rm /tmp/tmp.aBObjQnBNf /tmp/tmp.asvJXSohmw ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vp4dpOds6N +++ mktemp ++ local LAST_ERR=/tmp/tmp.R0qIOwBIOy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vp4dpOds6N ++ cat /tmp/tmp.R0qIOwBIOy ++ rm /tmp/tmp.vp4dpOds6N /tmp/tmp.R0qIOwBIOy ++ return 0 + [[ 3 == \3 ]] + simple_data_check upgrade-rs0 3 + local cluster_name=upgrade-rs0 + let last_pod=3-1 + local isSharded=0 + local cluster_pfx= + '[' 0 -eq 1 ']' ++ seq 0 2 + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945 + local command=find + local uri=myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-14T11:43:44+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.ThbGbcFHlI +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kqri4P1m9v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ThbGbcFHlI ++ cat /tmp/tmp.Kqri4P1m9v ++ rm /tmp/tmp.ThbGbcFHlI /tmp/tmp.Kqri4P1m9v ++ return 0 + local client_container=psmdb-client-66f577db5f-wmvgp + local mongo_flag= + [[ myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.uxOnepU7nV ++ mktemp + local LAST_ERR=/tmp/tmp.quAo4hizJk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uxOnepU7nV + cat /tmp/tmp.quAo4hizJk + rm /tmp/tmp.uxOnepU7nV /tmp/tmp.quAo4hizJk + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/upgrade/compare/find.json /tmp/tmp.DvEhBb5T17/find + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945 + local command=find + local uri=myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-14T11:43:46+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZfzlYk6xEz +++ mktemp ++ local LAST_ERR=/tmp/tmp.H62bNqw0mZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZfzlYk6xEz ++ cat /tmp/tmp.H62bNqw0mZ ++ rm /tmp/tmp.ZfzlYk6xEz /tmp/tmp.H62bNqw0mZ ++ return 0 + local client_container=psmdb-client-66f577db5f-wmvgp + local mongo_flag= + [[ myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DtBFq6alkb ++ mktemp + local LAST_ERR=/tmp/tmp.IieBLwunpE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DtBFq6alkb + cat /tmp/tmp.IieBLwunpE + rm /tmp/tmp.DtBFq6alkb /tmp/tmp.IieBLwunpE + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/upgrade/compare/find.json /tmp/tmp.DvEhBb5T17/find + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945 + local command=find + local uri=myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-14T11:43:49+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.3dVcmo5Qx5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mSYRFT8bOa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3dVcmo5Qx5 ++ cat /tmp/tmp.mSYRFT8bOa ++ rm /tmp/tmp.3dVcmo5Qx5 /tmp/tmp.mSYRFT8bOa ++ return 0 + local client_container=psmdb-client-66f577db5f-wmvgp + local mongo_flag= + [[ myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.efHeEW9M2a ++ mktemp + local LAST_ERR=/tmp/tmp.kqKJN3XHtN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.efHeEW9M2a + cat /tmp/tmp.kqKJN3XHtN + rm /tmp/tmp.efHeEW9M2a /tmp/tmp.kqKJN3XHtN + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/upgrade/compare/find.json /tmp/tmp.DvEhBb5T17/find + check_applied_images all + local updated_image=all + case "${updated_image}" in ++ kubectl_bin get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EUH3teD3BP +++ mktemp ++ local LAST_ERR=/tmp/tmp.mx4xY2T3HH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -n psmdb-operator --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[*].spec.containers[?(@.name == "percona-server-mongodb-operator")].image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EUH3teD3BP ++ cat /tmp/tmp.mx4xY2T3HH ++ rm /tmp/tmp.EUH3teD3BP /tmp/tmp.mx4xY2T3HH ++ return 0 + [[ perconalab/percona-server-mongodb-operator:PR-1961-f6beb261 == perconalab/percona-server-mongodb-operator:PR-1961-f6beb261 ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.backup.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XgXD1GaupJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.1ybZlf8hnj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.backup.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XgXD1GaupJ ++ cat /tmp/tmp.1ybZlf8hnj ++ rm /tmp/tmp.XgXD1GaupJ /tmp/tmp.1ybZlf8hnj ++ return 0 + [[ perconalab/percona-server-mongodb-operator:main-backup == perconalab/percona-server-mongodb-operator:main-backup ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.pmm.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mURaoNcjph +++ mktemp ++ local LAST_ERR=/tmp/tmp.y37Ka0U5yW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.pmm.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mURaoNcjph ++ cat /tmp/tmp.y37Ka0U5yW ++ rm /tmp/tmp.mURaoNcjph /tmp/tmp.y37Ka0U5yW ++ return 0 + [[ perconalab/pmm-client:dev-latest == perconalab/pmm-client:dev-latest ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.spec.image}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u8FCBT8bmj +++ mktemp ++ local LAST_ERR=/tmp/tmp.fHPoL2gr3L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.spec.image}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u8FCBT8bmj ++ cat /tmp/tmp.fHPoL2gr3L ++ rm /tmp/tmp.u8FCBT8bmj /tmp/tmp.fHPoL2gr3L ++ return 0 + [[ perconalab/percona-server-mongodb-operator:main-mongod7.0 == perconalab/percona-server-mongodb-operator:main-mongod7.0 ]] + : Cluster images have been updated correctly + compare_generation 2 statefulset upgrade-rs0 + local generation=2 + local resource=statefulset + local name=upgrade-rs0 + local current_generation ++ kubectl_bin get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.up70nN8PYm +++ mktemp ++ local LAST_ERR=/tmp/tmp.CR2coNFt42 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset upgrade-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.up70nN8PYm ++ cat /tmp/tmp.CR2coNFt42 ++ rm /tmp/tmp.up70nN8PYm /tmp/tmp.CR2coNFt42 ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_generation 2 psmdb upgrade + local generation=2 + local resource=psmdb + local name=upgrade + local current_generation ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l0YrBQGMBV +++ mktemp ++ local LAST_ERR=/tmp/tmp.3YQpBslJIP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l0YrBQGMBV ++ cat /tmp/tmp.3YQpBslJIP ++ rm /tmp/tmp.l0YrBQGMBV /tmp/tmp.3YQpBslJIP ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + desc 'drop collection and do restore with new version' + set +o xtrace ----------------------------------------------------------------------------------- drop collection and do restore with new version ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@upgrade-rs0.upgrade-18945 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@upgrade-rs0.upgrade-18945 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y2L59krKTK +++ mktemp ++ local LAST_ERR=/tmp/tmp.ivxiUoIHDT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y2L59krKTK ++ cat /tmp/tmp.ivxiUoIHDT ++ rm /tmp/tmp.y2L59krKTK /tmp/tmp.ivxiUoIHDT ++ return 0 + local client_container=psmdb-client-66f577db5f-wmvgp + local mongo_flag= + [[ myApp:myPass@upgrade-rs0.upgrade-18945 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.eYojbTtEvg ++ mktemp + local LAST_ERR=/tmp/tmp.v30yQqnmE7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eYojbTtEvg Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-rs0-1.upgrade-rs0.upgrade-18945.svc.cluster.local:27017,upgrade-rs0-2.upgrade-rs0.upgrade-18945.svc.cluster.local:27017,upgrade-rs0-0.upgrade-rs0.upgrade-18945.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("2c347581-253d-4df1-a809-6ae42ea56fd8") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.v30yQqnmE7 + rm /tmp/tmp.eYojbTtEvg /tmp/tmp.v30yQqnmE7 + return 0 + check_backup_in_storage backup-minio minio rs0 myApp.test.gz + local backup=backup-minio + local storage_type=minio + local replset=rs0 + local file=myApp.test.gz + local endpoint + case ${storage_type} in + endpoint=minio-service ++ get_backup_dest backup-minio ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ local backup_name=backup-minio ++ sed -e 's/.json$//' ++ kubectl_bin get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.igKyeh2kNQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.gNlJvCTBG4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.igKyeh2kNQ ++ cat /tmp/tmp.gNlJvCTBG4 ++ rm /tmp/tmp.igKyeh2kNQ /tmp/tmp.gNlJvCTBG4 ++ return 0 + backup_dest=operator-testing/2025-08-14T11:40:47Z + [[ minio == \m\i\n\i\o ]] + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-14T11:40:47Z/rs0/myApp.test.gz + grep myApp.test.gz ++ mktemp + local LAST_OUT=/tmp/tmp.o53Dt7qYvm ++ mktemp + local LAST_ERR=/tmp/tmp.zn6qVsi9xy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-14T11:40:47Z/rs0/myApp.test.gz + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o53Dt7qYvm + cat /tmp/tmp.zn6qVsi9xy + rm /tmp/tmp.o53Dt7qYvm /tmp/tmp.zn6qVsi9xy + return 0 2025-08-14 11:40:51 55 myApp.test.gz + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@upgrade-rs0.upgrade-18945 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@upgrade-rs0.upgrade-18945 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YjKJNOHcoj +++ mktemp ++ local LAST_ERR=/tmp/tmp.NuWGbHOlId ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YjKJNOHcoj ++ cat /tmp/tmp.NuWGbHOlId ++ rm /tmp/tmp.YjKJNOHcoj /tmp/tmp.NuWGbHOlId ++ return 0 + local client_container=psmdb-client-66f577db5f-wmvgp + local mongo_flag= + [[ myApp:myPass@upgrade-rs0.upgrade-18945 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.NqXBURKzyi ++ mktemp + local LAST_ERR=/tmp/tmp.tiUueSE5ON + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NqXBURKzyi Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://upgrade-rs0-0.upgrade-rs0.upgrade-18945.svc.cluster.local:27017,upgrade-rs0-1.upgrade-rs0.upgrade-18945.svc.cluster.local:27017,upgrade-rs0-2.upgrade-rs0.upgrade-18945.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("931184c3-44a8-431c-9c04-015ae17f10dc") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.tiUueSE5ON + rm /tmp/tmp.NqXBURKzyi /tmp/tmp.tiUueSE5ON + return 0 + compare_mongo_cmd find myApp:myPass@upgrade-rs0.upgrade-18945 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@upgrade-rs0.upgrade-18945 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-14T11:44:08+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0.upgrade-18945 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0.upgrade-18945 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fQvmOzeVZ3 egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_ERR=/tmp/tmp.zpTBOwjUOh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fQvmOzeVZ3 ++ cat /tmp/tmp.zpTBOwjUOh ++ rm /tmp/tmp.fQvmOzeVZ3 /tmp/tmp.zpTBOwjUOh ++ return 0 + local client_container=psmdb-client-66f577db5f-wmvgp + local mongo_flag= + [[ myApp:myPass@upgrade-rs0.upgrade-18945 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.reipRTa5zD ++ mktemp + local LAST_ERR=/tmp/tmp.PYOpZu5WNf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.reipRTa5zD + cat /tmp/tmp.PYOpZu5WNf + rm /tmp/tmp.reipRTa5zD /tmp/tmp.PYOpZu5WNf + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/upgrade/compare/find-2nd.json /tmp/tmp.DvEhBb5T17/find-2nd + run_restore backup-minio + local backup_name=backup-minio + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/upgrade/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.TjRvSYw8B4 ++ mktemp + local LAST_ERR=/tmp/tmp.M00zSgFjqA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TjRvSYw8B4 perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio created + cat /tmp/tmp.M00zSgFjqA + rm /tmp/tmp.TjRvSYw8B4 /tmp/tmp.M00zSgFjqA + return 0 + wait_restore backup-minio upgrade + local backup_name=backup-minio + local cluster_name=upgrade + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio object to be createdOK Waiting psmdb-restore/restore-backup-minio to reach state "ready" OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency upgrade + local retry=0 ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1MKmFa9Ml6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NEJWWdSeXS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1MKmFa9Ml6 ++ cat /tmp/tmp.NEJWWdSeXS ++ rm /tmp/tmp.1MKmFa9Ml6 /tmp/tmp.NEJWWdSeXS ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get psmdb upgrade -o 'jsonpath={.status.replsets.rs0.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X5bzqGGoQK +++ mktemp ++ local LAST_ERR=/tmp/tmp.NLfpZ7FOTW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb upgrade -o 'jsonpath={.status.replsets.rs0.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X5bzqGGoQK ++ cat /tmp/tmp.NLfpZ7FOTW ++ rm /tmp/tmp.X5bzqGGoQK /tmp/tmp.NLfpZ7FOTW ++ return 0 + [[ 3 == \3 ]] + simple_data_check upgrade-rs0 3 + local cluster_name=upgrade-rs0 + let last_pod=3-1 + local isSharded=0 + local cluster_pfx= + '[' 0 -eq 1 ']' ++ seq 0 2 + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945 + local command=find + local uri=myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-14T11:44:25+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.tnmbJbJ1F8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.fo0yXl3Fsf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tnmbJbJ1F8 ++ cat /tmp/tmp.fo0yXl3Fsf ++ rm /tmp/tmp.tnmbJbJ1F8 /tmp/tmp.fo0yXl3Fsf ++ return 0 + local client_container=psmdb-client-66f577db5f-wmvgp + local mongo_flag= + [[ myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.UZ0M3cZQlL ++ mktemp + local LAST_ERR=/tmp/tmp.pxRagmujZD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-0.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UZ0M3cZQlL + cat /tmp/tmp.pxRagmujZD + rm /tmp/tmp.UZ0M3cZQlL /tmp/tmp.pxRagmujZD + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/upgrade/compare/find.json /tmp/tmp.DvEhBb5T17/find + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945 + local command=find + local uri=myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-14T11:44:27+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.6aFteCzrvm +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vbxdoz1c92 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6aFteCzrvm ++ cat /tmp/tmp.Vbxdoz1c92 ++ rm /tmp/tmp.6aFteCzrvm /tmp/tmp.Vbxdoz1c92 ++ return 0 + local client_container=psmdb-client-66f577db5f-wmvgp + local mongo_flag= + [[ myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.zC1K6VU20Z ++ mktemp + local LAST_ERR=/tmp/tmp.tKuXWThoCG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-1.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zC1K6VU20Z + cat /tmp/tmp.tKuXWThoCG + rm /tmp/tmp.zC1K6VU20Z /tmp/tmp.tKuXWThoCG + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/upgrade/compare/find.json /tmp/tmp.DvEhBb5T17/find + for i in $(seq 0 $last_pod) + compare_mongo_cmd find myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945 + local command=find + local uri=myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-14T11:44:30+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.FxyMo3VU6Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.vKGVrqtGXm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FxyMo3VU6Q ++ cat /tmp/tmp.vKGVrqtGXm ++ rm /tmp/tmp.FxyMo3VU6Q /tmp/tmp.vKGVrqtGXm ++ return 0 + local client_container=psmdb-client-66f577db5f-wmvgp + local mongo_flag= + [[ myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.TOf7eUPc7B ++ mktemp + local LAST_ERR=/tmp/tmp.zS1PRoJ2ev + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-wmvgp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@upgrade-rs0-2.upgrade-rs0.upgrade-18945.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TOf7eUPc7B + cat /tmp/tmp.zS1PRoJ2ev + rm /tmp/tmp.TOf7eUPc7B /tmp/tmp.zS1PRoJ2ev + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/upgrade/compare/find.json /tmp/tmp.DvEhBb5T17/find + desc cleanup + set +o xtrace ----------------------------------------------------------------------------------- cleanup ----------------------------------------------------------------------------------- + destroy upgrade-18945 + local namespace=upgrade-18945 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.f5xVi44pfb +++ mktemp ++ local LAST_ERR=/tmp/tmp.ICga0c4bME ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f5xVi44pfb ++ cat /tmp/tmp.ICga0c4bME ++ rm /tmp/tmp.f5xVi44pfb /tmp/tmp.ICga0c4bME ++ return 0 + '[' 1 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.lg8ieh62lt ++ mktemp + local LAST_ERR=/tmp/tmp.rf07CSvosF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lg8ieh62lt NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-minio upgrade minio s3://operator-testing/2025-08-14T11:40:47Z logical ready 3m42s 3m48s + cat /tmp/tmp.rf07CSvosF + rm /tmp/tmp.lg8ieh62lt /tmp/tmp.rf07CSvosF + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.7T98bgERPD ++ mktemp + local LAST_ERR=/tmp/tmp.3teZjb3cCy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7T98bgERPD perconaservermongodbbackup.psmdb.percona.com "backup-minio" deleted + cat /tmp/tmp.3teZjb3cCy + rm /tmp/tmp.7T98bgERPD /tmp/tmp.3teZjb3cCy + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.mngrhPlMBf ++ mktemp + local LAST_ERR=/tmp/tmp.mgpgOpbTUF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mngrhPlMBf customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.mgpgOpbTUF + rm /tmp/tmp.mngrhPlMBf /tmp/tmp.mgpgOpbTUF + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.07nYJ45410 ++ mktemp + local LAST_ERR=/tmp/tmp.AwNTIqZXLC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.07nYJ45410 + cat /tmp/tmp.AwNTIqZXLC + rm /tmp/tmp.07nYJ45410 /tmp/tmp.AwNTIqZXLC + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.qizsBar6Hf ++ mktemp + local LAST_ERR=/tmp/tmp.9LVEKB0Piy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qizsBar6Hf + cat /tmp/tmp.9LVEKB0Piy + rm /tmp/tmp.qizsBar6Hf /tmp/tmp.9LVEKB0Piy + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n upgrade-18945 upgrade --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/upgrade patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.jR6fSfibsN ++ mktemp + local LAST_ERR=/tmp/tmp.tzpD2TbpdY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jR6fSfibsN customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.tzpD2TbpdY + rm /tmp/tmp.jR6fSfibsN /tmp/tmp.tzpD2TbpdY + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.MYAyXJia9g ++ mktemp + local LAST_ERR=/tmp/tmp.655gAe63fx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MYAyXJia9g clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.655gAe63fx + rm /tmp/tmp.MYAyXJia9g /tmp/tmp.655gAe63fx + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.N3rgBHIL9j ++ mktemp + local LAST_ERR=/tmp/tmp.Oxup3hWuYv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.N3rgBHIL9j + cat /tmp/tmp.Oxup3hWuYv Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.N3rgBHIL9j + cat /tmp/tmp.Oxup3hWuYv Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.N3rgBHIL9j + cat /tmp/tmp.Oxup3hWuYv Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.N3rgBHIL9j + cat /tmp/tmp.Oxup3hWuYv Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.N3rgBHIL9j /tmp/tmp.Oxup3hWuYv + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace upgrade-18945 + rm -rf /tmp/tmp.DvEhBb5T17 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.XS84Sb9yTz + local LAST_OUT=/tmp/tmp.W2YKXWl6c8 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Fj0jFt7YSO + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.IvgKtzkGRh + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace upgrade-18945