Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/logs/upgrade-sharded.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + cluster=upgrade-sharded + CLUSTER_SIZE=3 + TARGET_OPERATOR_VER=1.21.0 + TARGET_IMAGE=perconalab/percona-server-mongodb-operator:PR-1961-970eeaa4 + TARGET_IMAGE_MONGOD=perconalab/percona-server-mongodb-operator:main-mongod7.0 + TARGET_IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest + TARGET_IMAGE_BACKUP=perconalab/percona-server-mongodb-operator:main-backup ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod7.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ /usr/bin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod7.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ local 'cli=mongod --version' +++ local pod_name=17886 +++ kubectl_bin -n default run 17886 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.I4DB1oqnBh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zo8yIwj1qm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default run 17886 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.I4DB1oqnBh +++ cat /tmp/tmp.zo8yIwj1qm +++ rm /tmp/tmp.I4DB1oqnBh /tmp/tmp.zo8yIwj1qm +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/17886 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OEBeeDKHFs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lnVIThn6V0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/17886 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OEBeeDKHFs +++ cat /tmp/tmp.lnVIThn6V0 +++ rm /tmp/tmp.OEBeeDKHFs /tmp/tmp.lnVIThn6V0 +++ return 0 ++++ kubectl_bin -n default exec 17886 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.6saanzZyFb +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pquIxMM3wm ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default exec 17886 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.6saanzZyFb ++++ cat /tmp/tmp.pquIxMM3wm ++++ rm /tmp/tmp.6saanzZyFb /tmp/tmp.pquIxMM3wm ++++ return 0 +++ local 'output=db version v7.0.22-12 Build Info: { "version": "7.0.22-12", "gitVersion": "dba13d69ddf3a3457b6e155d33709fd8bbf41a96", "openSSLVersion": "OpenSSL 3.2.2 4 Jun 2024", "modules": [], "proFeatures": [], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/17886 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wJGFlACaOG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oAoXIySWKz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default delete pod/17886 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wJGFlACaOG +++ cat /tmp/tmp.oAoXIySWKz Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.wJGFlACaOG /tmp/tmp.oAoXIySWKz +++ return 0 +++ echo db version v7.0.22-12 Build Info: '{' '"version":' '"7.0.22-12",' '"gitVersion":' '"dba13d69ddf3a3457b6e155d33709fd8bbf41a96",' '"openSSLVersion":' '"OpenSSL' 3.2.2 4 Jun '2024",' '"modules":' '[],' '"proFeatures":' '[],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=7.0.22-12 ++ [[ ! 7.0.22-12 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 7.0.22-12 + FULL_VER=7.0.22-12 + MONGO_VER=7.0 ++ curl -s https://check.percona.com/versions/v1/psmdb-operator ++ sort -V ++ jq -r '.versions[].operator' ++ tail -n1 + INIT_OPERATOR_VER=1.20.1 + [[ 1.20.1 == \1\.\2\1\.\0 ]] + GIT_TAG=v1.20.1 + case $(curnamespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io created serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view created clusterrole.rbac.authorization.k8s.io/cert-manager-view created clusterrole.rbac.authorization.k8s.io/cert-manager-edit created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created role.rbac.authorization.k8s.io/cert-manager:leaderelection created role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/cert-manager-6687d8765c-dqgtc condition met pod/cert-manager-cainjector-764498cfc8-jrqm7 condition met pod/cert-manager-webhook-74c74b87d7-cj7qs condition met ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- Error: uninstall: Release not loaded: minio-service: release: not found "minio" has been removed from your repositories "minio" has been added to your repositories NAME: minio-service LAST DEPLOYED: Wed Aug 6 22:21:00 2025 NAMESPACE: upgrade-sharded-21420 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.upgrade-sharded-21420.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace upgrade-sharded-21420 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace upgrade-sharded-21420 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace upgrade-sharded-21420 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace upgrade-sharded-21420 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local waiting for pod/minio-service-86dfccd949-6m67s to be ready.OK service/minio-service created make_bucket: operator-testing pod "aws-cli" deleted If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_upgrade-sharded-21420 ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- deployment.apps/psmdb-client created secret/my-cluster-name-secrets created ----------------------------------------------------------------------------------- create first PSMDB cluster upgrade-sharded ----------------------------------------------------------------------------------- perconaservermongodb.psmdb.percona.com/upgrade-sharded created ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- waiting for pod/upgrade-sharded-rs0-0 to be ready..................OK waiting for pod/upgrade-sharded-rs0-1 to be readyOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' + IMAGE_PMM_CLIENT=percona/pmm-client:2.44.1 ++ echo '{"versions":[{"product":"psmdb-operator", "operator":"1.20.1", "matrix":{"mongod":{"7.0.18-11":{"imagePath":"percona/percona-server-mongodb:7.0.18-11", "imageHash":"0115a72f5e60d86cb4f4b7eae32118c0910e8c96831e013de12798a1771c4c91", "imageHashArm64":"86c17067f3e233f522612389ed2500231cbb22ce93524c476b9aa8d464d06f0b", "status":"recommended", "critical":false}}, "pxc":{}, "pmm":{"2.44.1":{"imagePath":"percona/pmm-client:2.44.1", "imageHash":"8b2eaddffd626f02a2d5318ffebc0c277fe8457da6083b8cfcada9b6e6168616", "imageHashArm64":"337fecd4afdb3f6daf2caa2b341b9fe41d0418a0e4ec76980c7f29be9d08b5ea", "status":"available", "critical":false}}, "proxysql":{}, "haproxy":{}, "backup":{"2.9.1":{"imagePath":"percona/percona-backup-mongodb:2.9.1", "imageHash":"976bfbaa548eb70dd90bf0bd2dcfe40b2994d749ef644af3a0590f4856e4d7e2", "imageHashArm64":"ebc6e5c5aa3ed97991d3fd90e9201597b485ddc0eae8d7ee4311ecb785c03bf0", "status":"recommended", "critical":false}}, "operator":{"1.20.1":{"imagePath":"percona/percona-server-mongodb-operator:1.20.1", "imageHash":"b559cdd349916d806f6b13b4ac43fdbae982298fad2088b649631a356020ee46", "imageHashArm64":"5a66e497dd1650e5a1123659292fe4c615e0ab5ce7e5d8437bf2101f91b625e1", "status":"recommended", "critical":false}}, "logCollector":{}, "postgresql":{}, "pgbackrest":{}, "pgbackrestRepo":{}, "pgbadger":{}, "pgbouncer":{}, "pxcOperator":{}, "psmdbOperator":{}, "pgOperatorApiserver":{}, "pgOperatorEvent":{}, "pgOperatorRmdata":{}, "pgOperatorScheduler":{}, "pgOperator":{}, "pgOperatorDeployer":{}, "psOperator":{}, "mysql":{}, "router":{}, "orchestrator":{}, "toolkit":{}, "postgis":{}}}]}' ++ jq -r '.versions[].matrix.backup[].imagePath' + IMAGE_BACKUP=percona/percona-backup-mongodb:2.9.1 + [[ 1.21.0 == \1\.\2\0\.\1 ]] + main + rbac=rbac + '[' -n psmdb-operator ']' + rbac=cw-rbac + create_infra_gh upgrade-sharded-21420 v1.20.1 + local ns=upgrade-sharded-21420 + local git_tag=v1.20.1 + check_crd_for_deletion v1.20.1 + local git_tag=v1.20.1 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/perconaservermongodbbackups.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5wO3hEET0z +++ mktemp ++ local LAST_ERR=/tmp/tmp.nGMbQvV6rB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/perconaservermongodbbackups.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5wO3hEET0z ++ cat /tmp/tmp.nGMbQvV6rB ++ rm /tmp/tmp.5wO3hEET0z /tmp/tmp.nGMbQvV6rB ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/perconaservermongodbrestores.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xlwLUtlvvp +++ mktemp ++ local LAST_ERR=/tmp/tmp.TrUshcKaYy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/perconaservermongodbrestores.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xlwLUtlvvp ++ cat /tmp/tmp.TrUshcKaYy ++ rm /tmp/tmp.xlwLUtlvvp /tmp/tmp.TrUshcKaYy ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/perconaservermongodbs.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5eI8WVIZ0x +++ mktemp ++ local LAST_ERR=/tmp/tmp.IpQHyatb6H ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/perconaservermongodbs.psmdb.percona.com -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5eI8WVIZ0x ++ cat /tmp/tmp.IpQHyatb6H ++ rm /tmp/tmp.5eI8WVIZ0x /tmp/tmp.IpQHyatb6H ++ return 0 + [[ Established == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + awk '{print$1}' + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.AnRzZOFdcp ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.IC2ZOh3bim + local LAST_ERR=/tmp/tmp.CSsA37UsZL + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_ERR=/tmp/tmp.NJBloYwjpr + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AnRzZOFdcp + cat /tmp/tmp.CSsA37UsZL + rm /tmp/tmp.AnRzZOFdcp /tmp/tmp.CSsA37UsZL + return 0 namespace "cert-manager" deleted namespace "upgrade-sharded-286" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IC2ZOh3bim namespace "psmdb-operator" deleted + cat /tmp/tmp.NJBloYwjpr + rm /tmp/tmp.IC2ZOh3bim /tmp/tmp.NJBloYwjpr + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.bjA7hzx8At ++ mktemp + local LAST_ERR=/tmp/tmp.CZMiABmZnM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bjA7hzx8At + cat /tmp/tmp.CZMiABmZnM + rm /tmp/tmp.bjA7hzx8At /tmp/tmp.CZMiABmZnM + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.n3YumbtIlS ++ mktemp + local LAST_ERR=/tmp/tmp.Dg82nHZy4S + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n3YumbtIlS namespace/psmdb-operator created + cat /tmp/tmp.Dg82nHZy4S + rm /tmp/tmp.n3YumbtIlS /tmp/tmp.Dg82nHZy4S + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q3I1N0VZjD +++ mktemp ++ local LAST_ERR=/tmp/tmp.xE6OfhNKio ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q3I1N0VZjD ++ cat /tmp/tmp.xE6OfhNKio ++ rm /tmp/tmp.Q3I1N0VZjD /tmp/tmp.xE6OfhNKio ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster9 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.WdOjxai1tm ++ mktemp + local LAST_ERR=/tmp/tmp.bMrfEF9US0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster9 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WdOjxai1tm Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster9" modified. + cat /tmp/tmp.bMrfEF9US0 + rm /tmp/tmp.WdOjxai1tm /tmp/tmp.bMrfEF9US0 + return 0 + deploy_operator_gh v1.20.1 + local git_tag=v1.20.1 + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + kubectl_bin apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/crd.yaml --server-side ++ mktemp + local LAST_OUT=/tmp/tmp.pgS1Cf2ghz ++ mktemp + local LAST_ERR=/tmp/tmp.Dvttt8zpiZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/crd.yaml --server-side + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pgS1Cf2ghz customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.Dvttt8zpiZ + rm /tmp/tmp.pgS1Cf2ghz /tmp/tmp.Dvttt8zpiZ + return 0 + local rbac_yaml=rbac + local operator_yaml=operator + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac + operator_yaml=cw-operator + kubectl_bin apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.qICc6WM0Q6 ++ mktemp + local LAST_ERR=/tmp/tmp.DD4Bah6jZx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/cw-rbac.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qICc6WM0Q6 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator unchanged serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator unchanged + cat /tmp/tmp.DD4Bah6jZx + rm /tmp/tmp.qICc6WM0Q6 /tmp/tmp.DD4Bah6jZx + return 0 + curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.20.1/deploy/cw-operator.yaml + /usr/bin/sed -i -e 's^image: .*^image: perconalab\/percona-server-mongodb-operator:1.20.1^' /tmp/tmp.zTSJcV46Ka/cw-operator_v1.20.1.yaml + kubectl_bin apply -f /tmp/tmp.zTSJcV46Ka/cw-operator_v1.20.1.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.OjowB29tFr ++ mktemp + local LAST_ERR=/tmp/tmp.VMt7j7sO1N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /tmp/tmp.zTSJcV46Ka/cw-operator_v1.20.1.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OjowB29tFr deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.VMt7j7sO1N + rm /tmp/tmp.OjowB29tFr /tmp/tmp.VMt7j7sO1N + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.0uj8rHCSqz +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z9jtYiAteS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0uj8rHCSqz ++ cat /tmp/tmp.Z9jtYiAteS ++ rm /tmp/tmp.0uj8rHCSqz /tmp/tmp.Z9jtYiAteS ++ return 0 + wait_pod percona-server-mongodb-operator-78d4cb8b56-2p944 + local pod=percona-server-mongodb-operator-78d4cb8b56-2p944 + set +o xtrace waiting for pod/percona-server-mongodb-operator-78d4cb8b56-2p944 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.3hwriRL6V5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.cAjxpzUp8x ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3hwriRL6V5 ++ cat /tmp/tmp.cAjxpzUp8x ++ rm /tmp/tmp.3hwriRL6V5 /tmp/tmp.cAjxpzUp8x ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-78d4cb8b56-2p944 ++ mktemp + local LAST_OUT=/tmp/tmp.LeX31da4yr ++ mktemp + local LAST_ERR=/tmp/tmp.qbdi0ojtAm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs percona-server-mongodb-operator-78d4cb8b56-2p944 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LeX31da4yr + cat /tmp/tmp.qbdi0ojtAm + rm /tmp/tmp.LeX31da4yr /tmp/tmp.qbdi0ojtAm + return 0 2025-08-06T22:19:13.227Z INFO setup Manager starting up {"gitCommit": "30d9ec941baf57619c8973249a3c5d3fd5cc08f4", "gitBranch": "release-1-20-1", "buildTime": "", "goVersion": "go1.24.3", "os": "linux", "arch": "amd64"} + create_namespace upgrade-sharded-21420 + local namespace=upgrade-sharded-21420 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces upgrade-sharded-21420' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-sharded-21420 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-sharded-21420 --ignore-not-found + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.m4zHz1CPDR + local LAST_OUT=/tmp/tmp.tY01ptP2at ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.iU5SMmZjNp + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.pF5cb3eLey + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + for i in '$(seq 0 2)' + set +e + kubectl delete namespace upgrade-sharded-21420 --ignore-not-found + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tY01ptP2at + cat /tmp/tmp.pF5cb3eLey + rm /tmp/tmp.tY01ptP2at /tmp/tmp.pF5cb3eLey + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m4zHz1CPDR + cat /tmp/tmp.iU5SMmZjNp + rm /tmp/tmp.m4zHz1CPDR /tmp/tmp.iU5SMmZjNp + return 0 + kubectl_bin wait --for=delete namespace upgrade-sharded-21420 ++ mktemp + local LAST_OUT=/tmp/tmp.dpFIjF8HdB ++ mktemp + local LAST_ERR=/tmp/tmp.3zCD6I79BT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace upgrade-sharded-21420 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dpFIjF8HdB + cat /tmp/tmp.3zCD6I79BT + rm /tmp/tmp.dpFIjF8HdB /tmp/tmp.3zCD6I79BT + return 0 + desc 'create namespace upgrade-sharded-21420' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-sharded-21420 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-sharded-21420 ++ mktemp + local LAST_OUT=/tmp/tmp.QT0XEFZqLO ++ mktemp + local LAST_ERR=/tmp/tmp.2yRLvVgFTw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace upgrade-sharded-21420 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QT0XEFZqLO namespace/upgrade-sharded-21420 created + cat /tmp/tmp.2yRLvVgFTw + rm /tmp/tmp.QT0XEFZqLO /tmp/tmp.2yRLvVgFTw + return 0 + set_kube_ctx upgrade-sharded-21420 + local namespace=upgrade-sharded-21420 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.j0fieW5Q77 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1nw3Itfqj3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j0fieW5Q77 ++ cat /tmp/tmp.1nw3Itfqj3 ++ rm /tmp/tmp.j0fieW5Q77 /tmp/tmp.1nw3Itfqj3 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster9 --namespace=upgrade-sharded-21420 ++ mktemp + local LAST_OUT=/tmp/tmp.UKz9Dz22h4 ++ mktemp + local LAST_ERR=/tmp/tmp.HV2ROyiNOa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster9 --namespace=upgrade-sharded-21420 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UKz9Dz22h4 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster9" modified. + cat /tmp/tmp.HV2ROyiNOa + rm /tmp/tmp.UKz9Dz22h4 /tmp/tmp.HV2ROyiNOa + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.yczaNJY5SV ++ mktemp + local LAST_ERR=/tmp/tmp.SOIYTwK6QN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yczaNJY5SV namespace/cert-manager created + cat /tmp/tmp.SOIYTwK6QN + rm /tmp/tmp.yczaNJY5SV /tmp/tmp.SOIYTwK6QN + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.SMOimdgcAR ++ mktemp + local LAST_ERR=/tmp/tmp.tV4HMJYa7K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SMOimdgcAR namespace/cert-manager labeled + cat /tmp/tmp.tV4HMJYa7K + rm /tmp/tmp.SMOimdgcAR /tmp/tmp.tV4HMJYa7K + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.ptf5I4Hfwm ++ mktemp + local LAST_ERR=/tmp/tmp.rW9nJl0hf3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ptf5I4Hfwm namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.rW9nJl0hf3 Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.ptf5I4Hfwm /tmp/tmp.rW9nJl0hf3 + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.IaZBvcqmXI ++ mktemp + local LAST_ERR=/tmp/tmp.GCrDTAbgSO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IaZBvcqmXI pod/cert-manager-6687d8765c-dwpzg condition met pod/cert-manager-cainjector-764498cfc8-6gbxc condition met pod/cert-manager-webhook-74c74b87d7-48wsf condition met + cat /tmp/tmp.GCrDTAbgSO + rm /tmp/tmp.IaZBvcqmXI /tmp/tmp.GCrDTAbgSO + return 0 + sleep 120 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.hAUzce7V6m ++ mktemp + local LAST_ERR=/tmp/tmp.tDkraOF1ue + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hAUzce7V6m secret/minio-secret unchanged secret/aws-s3-secret unchanged secret/gcp-cs-secret unchanged secret/azure-secret unchanged + cat /tmp/tmp.tDkraOF1ue + rm /tmp/tmp.hAUzce7V6m /tmp/tmp.tDkraOF1ue + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service release "minio-service" uninstalled + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Wed Aug 6 22:21:48 2025 NAMESPACE: upgrade-sharded-21420 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.upgrade-sharded-21420.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace upgrade-sharded-21420 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace upgrade-sharded-21420 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace upgrade-sharded-21420 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace upgrade-sharded-21420 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V0PNHfFg56 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Wtkah6lPPv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V0PNHfFg56 ++ cat /tmp/tmp.Wtkah6lPPv ++ rm /tmp/tmp.V0PNHfFg56 /tmp/tmp.Wtkah6lPPv ++ return 0 + MINIO_POD=minio-service-86dfccd949-fprht + wait_pod minio-service-86dfccd949-fprht + local pod=minio-service-86dfccd949-fprht + set +o xtrace waiting for pod/minio-service-86dfccd949-fprht to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.upgrade-sharded-21420.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.1HXi8vYvtB ++ mktemp + local LAST_ERR=/tmp/tmp.zBqG0lM9i8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.upgrade-sharded-21420.svc.cluster.local --tcp=9000 + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.1HXi8vYvtB + cat /tmp/tmp.zBqG0lM9i8 error: failed to create ExternalName service: services "minio-service" already exists + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.upgrade-sharded-21420.svc.cluster.local --tcp=9000 + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.1HXi8vYvtB + cat /tmp/tmp.zBqG0lM9i8 error: failed to create ExternalName service: services "minio-service" already exists + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.upgrade-sharded-21420.svc.cluster.local --tcp=9000 + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.1HXi8vYvtB + cat /tmp/tmp.zBqG0lM9i8 error: failed to create ExternalName service: services "minio-service" already exists + sleep 8 + cat /tmp/tmp.1HXi8vYvtB + cat /tmp/tmp.zBqG0lM9i8 error: failed to create ExternalName service: services "minio-service" already exists + rm /tmp/tmp.1HXi8vYvtB /tmp/tmp.zBqG0lM9i8 + return 1