Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/logs/replset-remapping-sharded.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + source_cluster=some-name-source + target_cluster=some-name-target + setup_infra + create_infra replset-remapping-sharded-30354 + local ns=replset-remapping-sharded-30354 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.rdN2WoGqyD ++ mktemp + local LAST_ERR=/tmp/tmp.iP3CxCzl41 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rdN2WoGqyD customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.iP3CxCzl41 + rm /tmp/tmp.rdN2WoGqyD /tmp/tmp.iP3CxCzl41 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.2TlCb2231w ++ mktemp + local LAST_ERR=/tmp/tmp.0UPqtYBAIx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2TlCb2231w + cat /tmp/tmp.0UPqtYBAIx + rm /tmp/tmp.2TlCb2231w /tmp/tmp.0UPqtYBAIx + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.G5D3Pdk6Vz ++ mktemp + local LAST_ERR=/tmp/tmp.IrUQ0x9wkd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G5D3Pdk6Vz + cat /tmp/tmp.IrUQ0x9wkd + rm /tmp/tmp.G5D3Pdk6Vz /tmp/tmp.IrUQ0x9wkd + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.P1h6upcx5Y ++ mktemp + local LAST_ERR=/tmp/tmp.F0uggdesbi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P1h6upcx5Y + cat /tmp/tmp.F0uggdesbi + rm /tmp/tmp.P1h6upcx5Y /tmp/tmp.F0uggdesbi + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.MbeG8ob4wm ++ mktemp + local LAST_ERR=/tmp/tmp.gxpUeI4DMh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MbeG8ob4wm clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.gxpUeI4DMh + rm /tmp/tmp.MbeG8ob4wm /tmp/tmp.gxpUeI4DMh + return 0 + check_crd_for_deletion PR-2225-9afc535d + local git_tag=PR-2225-9afc535d ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2225-9afc535d/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4Xl4tpNkxM +++ mktemp ++ local LAST_ERR=/tmp/tmp.KnjNegcr0O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.4Xl4tpNkxM ++ cat /tmp/tmp.KnjNegcr0O Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.4Xl4tpNkxM ++ cat /tmp/tmp.KnjNegcr0O Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.4Xl4tpNkxM ++ cat /tmp/tmp.KnjNegcr0O Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.4Xl4tpNkxM ++ cat /tmp/tmp.KnjNegcr0O Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.4Xl4tpNkxM /tmp/tmp.KnjNegcr0O ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + awk '{print$1}' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.T6CeboUKpz ++ mktemp + local LAST_OUT=/tmp/tmp.qAj4jn6AzW ++ mktemp + local LAST_ERR=/tmp/tmp.ngPspDCK1t + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.jLZsDTWrCV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T6CeboUKpz + cat /tmp/tmp.ngPspDCK1t + rm /tmp/tmp.T6CeboUKpz /tmp/tmp.ngPspDCK1t + return 0 namespace "replset-remapping-sharded-877" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qAj4jn6AzW namespace "psmdb-operator" deleted + cat /tmp/tmp.jLZsDTWrCV + rm /tmp/tmp.qAj4jn6AzW /tmp/tmp.jLZsDTWrCV + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.lsbO5rPFRW ++ mktemp + local LAST_ERR=/tmp/tmp.RGJoy6pJjS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lsbO5rPFRW + cat /tmp/tmp.RGJoy6pJjS + rm /tmp/tmp.lsbO5rPFRW /tmp/tmp.RGJoy6pJjS + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.tdGuYnPa0u ++ mktemp + local LAST_ERR=/tmp/tmp.Dm8LhXWW4p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tdGuYnPa0u namespace/psmdb-operator created + cat /tmp/tmp.Dm8LhXWW4p + rm /tmp/tmp.tdGuYnPa0u /tmp/tmp.Dm8LhXWW4p + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.EFSnB1jqu7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mj5owiUGK8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EFSnB1jqu7 ++ cat /tmp/tmp.mj5owiUGK8 ++ rm /tmp/tmp.EFSnB1jqu7 /tmp/tmp.mj5owiUGK8 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2225-9afc535d-10-cluster8 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.EbQXkOvI1Z ++ mktemp + local LAST_ERR=/tmp/tmp.UqUAxSoisL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2225-9afc535d-10-cluster8 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EbQXkOvI1Z Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2225-9afc535d-10-cluster8" modified. + cat /tmp/tmp.UqUAxSoisL + rm /tmp/tmp.EbQXkOvI1Z /tmp/tmp.UqUAxSoisL + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2225-9afc535d' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2225-9afc535d ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.7n3gNN5gjV ++ mktemp + local LAST_ERR=/tmp/tmp.fuyytdNqAj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7n3gNN5gjV customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.fuyytdNqAj + rm /tmp/tmp.7n3gNN5gjV /tmp/tmp.fuyytdNqAj + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.bjnDZyEYUw ++ mktemp + local LAST_ERR=/tmp/tmp.IZx9HnlleJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bjnDZyEYUw clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.IZx9HnlleJ + rm /tmp/tmp.bjnDZyEYUw /tmp/tmp.IZx9HnlleJ + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2225-9afc535d") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.sgqmiHPiMr ++ mktemp + local LAST_ERR=/tmp/tmp.l80TiTkI4Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sgqmiHPiMr deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.l80TiTkI4Z + rm /tmp/tmp.sgqmiHPiMr /tmp/tmp.l80TiTkI4Z + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.cHzokcULe4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jesOFSdrg6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cHzokcULe4 ++ cat /tmp/tmp.jesOFSdrg6 ++ rm /tmp/tmp.cHzokcULe4 /tmp/tmp.jesOFSdrg6 ++ return 0 + wait_operator_pod percona-server-mongodb-operator-8dd675c98-dkdtj + local pod=percona-server-mongodb-operator-8dd675c98-dkdtj + set +o xtrace waiting for pod/percona-server-mongodb-operator-8dd675c98-dkdtj to be ready.OK + echo 'Print operator info from log' Print operator info from log ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator + grep 'Manager starting up' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cbqv7kCcSs +++ mktemp ++ local LAST_ERR=/tmp/tmp.PqkEPwiLj6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cbqv7kCcSs ++ cat /tmp/tmp.PqkEPwiLj6 ++ rm /tmp/tmp.cbqv7kCcSs /tmp/tmp.PqkEPwiLj6 ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-8dd675c98-dkdtj ++ mktemp + local LAST_OUT=/tmp/tmp.JS5ibjNdk4 ++ mktemp + local LAST_ERR=/tmp/tmp.YfepfQDBif + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-8dd675c98-dkdtj + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JS5ibjNdk4 + cat /tmp/tmp.YfepfQDBif + rm /tmp/tmp.JS5ibjNdk4 /tmp/tmp.YfepfQDBif + return 0 2026-02-10T13:11:43.429Z INFO setup Manager starting up {"gitCommit": "9afc535dcde138ea52c4200226a3c0880d2c31db", "gitBranch": "PR-2225-9afc535d", "buildTime": "", "goVersion": "go1.25.7", "os": "linux", "arch": "amd64"} + create_namespace replset-remapping-sharded-30354 + local namespace=replset-remapping-sharded-30354 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces replset-remapping-sharded-30354' + xargs kubectl delete ns ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces replset-remapping-sharded-30354 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace replset-remapping-sharded-30354 --ignore-not-found + local LAST_OUT=/tmp/tmp.f9j9xfHx58 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.j1k20rSzKX + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.eLVBvdNxVP ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.gPywcbaxJw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace replset-remapping-sharded-30354 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f9j9xfHx58 + cat /tmp/tmp.j1k20rSzKX + rm /tmp/tmp.f9j9xfHx58 /tmp/tmp.j1k20rSzKX + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eLVBvdNxVP + cat /tmp/tmp.gPywcbaxJw + rm /tmp/tmp.eLVBvdNxVP /tmp/tmp.gPywcbaxJw + return 0 + kubectl_bin wait --for=delete namespace replset-remapping-sharded-30354 ++ mktemp + local LAST_OUT=/tmp/tmp.kJV3DEB7kF ++ mktemp + local LAST_ERR=/tmp/tmp.KDlulXOOGg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace replset-remapping-sharded-30354 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kJV3DEB7kF + cat /tmp/tmp.KDlulXOOGg + rm /tmp/tmp.kJV3DEB7kF /tmp/tmp.KDlulXOOGg + return 0 + desc 'create namespace replset-remapping-sharded-30354' + set +o xtrace ----------------------------------------------------------------------------------- create namespace replset-remapping-sharded-30354 ----------------------------------------------------------------------------------- + kubectl_bin create namespace replset-remapping-sharded-30354 ++ mktemp + local LAST_OUT=/tmp/tmp.8YSpWNCybL ++ mktemp + local LAST_ERR=/tmp/tmp.86Z2Lf7R8J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace replset-remapping-sharded-30354 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8YSpWNCybL namespace/replset-remapping-sharded-30354 created + cat /tmp/tmp.86Z2Lf7R8J + rm /tmp/tmp.8YSpWNCybL /tmp/tmp.86Z2Lf7R8J + return 0 + set_kube_ctx replset-remapping-sharded-30354 + local namespace=replset-remapping-sharded-30354 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZJWUrGtt5X +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vbg5KPvlcG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZJWUrGtt5X ++ cat /tmp/tmp.Vbg5KPvlcG ++ rm /tmp/tmp.ZJWUrGtt5X /tmp/tmp.Vbg5KPvlcG ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2225-9afc535d-10-cluster8 --namespace=replset-remapping-sharded-30354 ++ mktemp + local LAST_OUT=/tmp/tmp.SZO9wZQ8qt ++ mktemp + local LAST_ERR=/tmp/tmp.DphsC2aZZj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2225-9afc535d-10-cluster8 --namespace=replset-remapping-sharded-30354 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SZO9wZQ8qt Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2225-9afc535d-10-cluster8" modified. + cat /tmp/tmp.DphsC2aZZj + rm /tmp/tmp.SZO9wZQ8qt /tmp/tmp.DphsC2aZZj + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/conf/secrets_with_tls.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.xZgjrPNz3x ++ mktemp + local LAST_ERR=/tmp/tmp.MtmKOJu6is + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/conf/secrets_with_tls.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xZgjrPNz3x deployment.apps/psmdb-client created secret/some-users created secret/some-name-ssl created secret/some-name-ssl-internal created secret/minio-secret created + cat /tmp/tmp.MtmKOJu6is + rm /tmp/tmp.xZgjrPNz3x /tmp/tmp.MtmKOJu6is + return 0 + deploy_minio + local cert_secret= + local service_name=minio-service + desc 'install MinIO: minio-service' + set +o xtrace ----------------------------------------------------------------------------------- install MinIO: minio-service ----------------------------------------------------------------------------------- + helm uninstall minio-service + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + local endpoint=http://minio-service:9000 + minio_args=('--version' '5.4.0' '--set' 'replicas=1' '--set' 'mode=standalone' '--set' 'resources.requests.memory=256Mi' '--set' 'rootUser=rootuser' '--set' 'rootPassword=rootpass123' '--set' 'users[0].accessKey=some-access-key' '--set' 'users[0].secretKey=some-secret-key' '--set' 'users[0].policy=consoleAdmin' '--set' 'service.type=ClusterIP' '--set' 'configPathmc=/tmp/' '--set' 'securityContext.enabled=false' '--set' 'persistence.size=2G' '--set' 'fullnameOverride=minio-service' '--set' 'serviceAccount.create=true' '--set' 'serviceAccount.name=minio-service-sa') + local minio_args + [[ -n '' ]] + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio NAME: minio-service LAST DEPLOYED: Tue Feb 10 13:12:24 2026 NAMESPACE: replset-remapping-sharded-30354 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.replset-remapping-sharded-30354.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace replset-remapping-sharded-30354 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace replset-remapping-sharded-30354 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace replset-remapping-sharded-30354 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace replset-remapping-sharded-30354 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BgmRWCkuRq +++ mktemp ++ local LAST_ERR=/tmp/tmp.jW33M8Mfu0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BgmRWCkuRq ++ cat /tmp/tmp.jW33M8Mfu0 ++ rm /tmp/tmp.BgmRWCkuRq /tmp/tmp.jW33M8Mfu0 ++ return 0 + local MINIO_POD=minio-service-6d5f646cdc-nrtds + wait_pod minio-service-6d5f646cdc-nrtds + local pod=minio-service-6d5f646cdc-nrtds + set +o xtrace waiting for pod/minio-service-6d5f646cdc-nrtds to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.replset-remapping-sharded-30354.svc.cluster.local --tcp=9000 service/minio-service created + create_minio_bucket operator-testing http://minio-service:9000 + local bucket=operator-testing + local endpoint=http://minio-service:9000 + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.bgXRDWm9xH ++ mktemp + local LAST_ERR=/tmp/tmp.eBovKLyTDV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bgXRDWm9xH make_bucket: operator-testing pod "aws-cli" deleted from replset-remapping-sharded-30354 namespace + cat /tmp/tmp.eBovKLyTDV All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_replset-remapping-sharded-30354 + rm /tmp/tmp.bgXRDWm9xH /tmp/tmp.eBovKLyTDV + return 0 + setup_source_cluster + desc 'setting up source cluster: some-name-source' + set +o xtrace ----------------------------------------------------------------------------------- setting up source cluster: some-name-source ----------------------------------------------------------------------------------- + log 'creating PSMDB cluster: some-name-source' + set +o xtrace [2026-02-10T13:12:59+0000] creating PSMDB cluster: some-name-source + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/some-name-source.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/some-name-source.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/some-name-source.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2225-9afc535d"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' ++ mktemp + /usr/sbin/sed -e s/NAME_SPACE/replset-remapping-sharded-30354/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.8nAMVIsIYF ++ mktemp + local LAST_ERR=/tmp/tmp.TnZeglNHt2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8nAMVIsIYF perconaservermongodb.psmdb.percona.com/some-name-source created + cat /tmp/tmp.TnZeglNHt2 + rm /tmp/tmp.8nAMVIsIYF /tmp/tmp.TnZeglNHt2 + return 0 + wait_for_running some-name-source-cfg 3 + local name=some-name-source-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name-source ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-source-cfg-0 + local pod=some-name-source-cfg-0 + set +o xtrace waiting for pod/some-name-source-cfg-0 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-source-cfg-1 + local pod=some-name-source-cfg-1 + set +o xtrace waiting for pod/some-name-source-cfg-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.59fXlFMuye +++ mktemp ++ local LAST_ERR=/tmp/tmp.EtMWwF53xP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.59fXlFMuye ++ cat /tmp/tmp.EtMWwF53xP ++ rm /tmp/tmp.59fXlFMuye /tmp/tmp.EtMWwF53xP ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-source-cfg-2 + local pod=some-name-source-cfg-2 + set +o xtrace waiting for pod/some-name-source-cfg-2 to be ready......OK ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.phINrqSi08 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8QUJIv4JLh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.phINrqSi08 ++ cat /tmp/tmp.8QUJIv4JLh ++ rm /tmp/tmp.phINrqSi08 /tmp/tmp.8QUJIv4JLh ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OuZvBVORx8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.wE7XTuoXjD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OuZvBVORx8 ++ cat /tmp/tmp.wE7XTuoXjD ++ rm /tmp/tmp.OuZvBVORx8 /tmp/tmp.wE7XTuoXjD ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.............................................................. + wait_for_running some-name-source-rs0 3 + local name=some-name-source-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name-source ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-source-rs0-0 + local pod=some-name-source-rs0-0 + set +o xtrace waiting for pod/some-name-source-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-source-rs0-1 + local pod=some-name-source-rs0-1 + set +o xtrace waiting for pod/some-name-source-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Mka6THaMZC +++ mktemp ++ local LAST_ERR=/tmp/tmp.DPd4vyaOVE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Mka6THaMZC ++ cat /tmp/tmp.DPd4vyaOVE ++ rm /tmp/tmp.Mka6THaMZC /tmp/tmp.DPd4vyaOVE ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-source-rs0-2 + local pod=some-name-source-rs0-2 + set +o xtrace waiting for pod/some-name-source-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E083f3mtHN +++ mktemp ++ local LAST_ERR=/tmp/tmp.HmPvIgUSbP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E083f3mtHN ++ cat /tmp/tmp.HmPvIgUSbP ++ rm /tmp/tmp.E083f3mtHN /tmp/tmp.HmPvIgUSbP ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HbiOptvT9a +++ mktemp ++ local LAST_ERR=/tmp/tmp.PGyi5h1oq7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HbiOptvT9a ++ cat /tmp/tmp.PGyi5h1oq7 ++ rm /tmp/tmp.HbiOptvT9a /tmp/tmp.PGyi5h1oq7 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-source-rs1 3 + local name=some-name-source-rs1 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs1 + local cluster_name=some-name-source ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-source-rs1-0 + local pod=some-name-source-rs1-0 + set +o xtrace waiting for pod/some-name-source-rs1-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-source-rs1-1 + local pod=some-name-source-rs1-1 + set +o xtrace waiting for pod/some-name-source-rs1-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5OAWT4nL2w +++ mktemp ++ local LAST_ERR=/tmp/tmp.NBfUjcTiVi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5OAWT4nL2w ++ cat /tmp/tmp.NBfUjcTiVi ++ rm /tmp/tmp.5OAWT4nL2w /tmp/tmp.NBfUjcTiVi ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-source-rs1-2 + local pod=some-name-source-rs1-2 + set +o xtrace waiting for pod/some-name-source-rs1-2 to be ready.OK ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G8QExEoUH7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FJDqJcov9W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G8QExEoUH7 ++ cat /tmp/tmp.FJDqJcov9W ++ rm /tmp/tmp.G8QExEoUH7 /tmp/tmp.FJDqJcov9W ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zpMoGz983J +++ mktemp ++ local LAST_ERR=/tmp/tmp.TNMVUF1zwm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs1")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zpMoGz983J ++ cat /tmp/tmp.TNMVUF1zwm ++ rm /tmp/tmp.zpMoGz983J /tmp/tmp.TNMVUF1zwm ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-source-mongos 3 + local name=some-name-source-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name-source ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-source-mongos-0 + local pod=some-name-source-mongos-0 + set +o xtrace waiting for pod/some-name-source-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-source-mongos-1 + local pod=some-name-source-mongos-1 + set +o xtrace waiting for pod/some-name-source-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KLdJ2bMrIK +++ mktemp ++ local LAST_ERR=/tmp/tmp.yjwuPXcWfF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KLdJ2bMrIK ++ cat /tmp/tmp.yjwuPXcWfF ++ rm /tmp/tmp.KLdJ2bMrIK /tmp/tmp.yjwuPXcWfF ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-source-mongos-2 + local pod=some-name-source-mongos-2 + set +o xtrace waiting for pod/some-name-source-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bEMBG72SqQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.7sjTvDdp20 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bEMBG72SqQ ++ cat /tmp/tmp.7sjTvDdp20 ++ rm /tmp/tmp.bEMBG72SqQ /tmp/tmp.7sjTvDdp20 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kTp5PUzY20 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gzKZg2Glyg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kTp5PUzY20 ++ cat /tmp/tmp.gzKZg2Glyg ++ rm /tmp/tmp.kTp5PUzY20 /tmp/tmp.gzKZg2Glyg ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name-source + local cluster_name=some-name-source + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Daylvto7jx +++ mktemp ++ local LAST_ERR=/tmp/tmp.5miXGhTXaW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Daylvto7jx ++ cat /tmp/tmp.5miXGhTXaW ++ rm /tmp/tmp.Daylvto7jx /tmp/tmp.5miXGhTXaW ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + log 'enable sharding' + set +o xtrace [2026-02-10T13:17:29+0000] enable sharding + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354 + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wesVojwyWU +++ mktemp ++ local LAST_ERR=/tmp/tmp.SI5CyGF4Dd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wesVojwyWU ++ cat /tmp/tmp.SI5CyGF4Dd ++ rm /tmp/tmp.wesVojwyWU /tmp/tmp.SI5CyGF4Dd ++ return 0 + local client_container=psmdb-client-86cb5d8484-rpcfb + kubectl_bin exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.NICLh3s9MX ++ mktemp + local LAST_ERR=/tmp/tmp.mgFWSHlIs9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NICLh3s9MX Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-source-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("095a6981-c2dc-4f27-bfcf-8a45e7b4983f") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1770729451, 10), "signature" : { "hash" : BinData(0,"veq31aR9WzJFTTDi8BsLdZIhmAc="), "keyId" : NumberLong("7605224283245117445") } }, "operationTime" : Timestamp(1770729451, 7) } bye + cat /tmp/tmp.mgFWSHlIs9 + rm /tmp/tmp.NICLh3s9MX /tmp/tmp.mgFWSHlIs9 + return 0 + sleep 2 + log 'write some data' + set +o xtrace [2026-02-10T13:17:33+0000] write some data + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' databaseAdmin:databaseAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=databaseAdmin:databaseAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z8x5CdPr75 +++ mktemp ++ local LAST_ERR=/tmp/tmp.d0bgz96vTb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z8x5CdPr75 ++ cat /tmp/tmp.d0bgz96vTb ++ rm /tmp/tmp.z8x5CdPr75 /tmp/tmp.d0bgz96vTb ++ return 0 + local client_container=psmdb-client-86cb5d8484-rpcfb + kubectl_bin exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://databaseAdmin:databaseAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.2hf2ozWWFX ++ mktemp + local LAST_ERR=/tmp/tmp.oUaPjjdgr4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://databaseAdmin:databaseAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2hf2ozWWFX Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-source-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("e7a5f065-2398-42f8-a452-6e9205d1f2fe") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.oUaPjjdgr4 + rm /tmp/tmp.2hf2ozWWFX /tmp/tmp.oUaPjjdgr4 + return 0 + log 'shard collection' + set +o xtrace [2026-02-10T13:17:35+0000] shard collection + run_mongos 'sh.shardCollection("myApp.test", { _id: 1 } )' clusterAdmin:clusterAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354 + local 'command=sh.shardCollection("myApp.test", { _id: 1 } )' + local uri=clusterAdmin:clusterAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NrM2VPGbDX +++ mktemp ++ local LAST_ERR=/tmp/tmp.VkQdF5pFiX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NrM2VPGbDX ++ cat /tmp/tmp.VkQdF5pFiX ++ rm /tmp/tmp.NrM2VPGbDX /tmp/tmp.VkQdF5pFiX ++ return 0 + local client_container=psmdb-client-86cb5d8484-rpcfb + kubectl_bin exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''sh.shardCollection("myApp.test", { _id: 1 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.a8FE3FaoeM ++ mktemp + local LAST_ERR=/tmp/tmp.4iAzQd4wg7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''sh.shardCollection("myApp.test", { _id: 1 } )\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.a8FE3FaoeM Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-source-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("6f5ede7e-2774-4040-9edd-e62790ce30fb") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match { "collectionsharded" : "myApp.test", "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1770729458, 16), "signature" : { "hash" : BinData(0,"LGYwsA97Iq5EJqjR5jh+/71Oj7k="), "keyId" : NumberLong("7605224283245117445") } }, "operationTime" : Timestamp(1770729458, 15) } bye + cat /tmp/tmp.4iAzQd4wg7 + rm /tmp/tmp.a8FE3FaoeM /tmp/tmp.4iAzQd4wg7 + return 0 + compare_mongos_cmd find databaseAdmin:databaseAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354 + local command=find + local uri=databaseAdmin:databaseAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-02-10T13:17:38+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' databaseAdmin:databaseAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=databaseAdmin:databaseAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cjoDmeRkqM +++ mktemp ++ local LAST_ERR=/tmp/tmp.A5Bx1FhVIW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cjoDmeRkqM ++ cat /tmp/tmp.A5Bx1FhVIW ++ rm /tmp/tmp.cjoDmeRkqM /tmp/tmp.A5Bx1FhVIW ++ return 0 + local client_container=psmdb-client-86cb5d8484-rpcfb + kubectl_bin exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://databaseAdmin:databaseAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.m404RieDyK ++ mktemp + local LAST_ERR=/tmp/tmp.iehlq5F5mA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://databaseAdmin:databaseAdmin123456@some-name-source-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m404RieDyK + cat /tmp/tmp.iehlq5F5mA + rm /tmp/tmp.m404RieDyK /tmp/tmp.iehlq5F5mA + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/compare/find.json /tmp/tmp.JGma3ixp9C/find + run_backup minio backup-minio-logical + local storage=minio + local backup_name=backup-minio-logical + local type=logical + log 'running backup backup-minio-logical' + set +o xtrace [2026-02-10T13:17:40+0000] running backup backup-minio-logical + yq eval '.metadata.name = "backup-minio-logical" | .spec.storageName = "minio" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.GWz8E3azZM ++ mktemp + local LAST_ERR=/tmp/tmp.crLFAEobeP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GWz8E3azZM perconaservermongodbbackup.psmdb.percona.com/backup-minio-logical created + cat /tmp/tmp.crLFAEobeP + rm /tmp/tmp.GWz8E3azZM /tmp/tmp.crLFAEobeP + return 0 + wait_backup backup-minio-logical + local backup_name=backup-minio-logical + local target_state=ready + set +o xtrace waiting for backup-minio-logical to reach ready state..................OK + run_backup minio backup-minio-physical + local storage=minio + local backup_name=backup-minio-physical + local type=logical + log 'running backup backup-minio-physical' + set +o xtrace [2026-02-10T13:18:16+0000] running backup backup-minio-physical + yq eval '.metadata.name = "backup-minio-physical" | .spec.storageName = "minio" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.HSftF7Ivuy ++ mktemp + local LAST_ERR=/tmp/tmp.zbXfcveZfq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HSftF7Ivuy perconaservermongodbbackup.psmdb.percona.com/backup-minio-physical created + cat /tmp/tmp.zbXfcveZfq + rm /tmp/tmp.HSftF7Ivuy /tmp/tmp.zbXfcveZfq + return 0 + wait_backup backup-minio-physical + local backup_name=backup-minio-physical + local target_state=ready + set +o xtrace waiting for backup-minio-physical to reach ready state..................OK + log 'deleting PSMDB cluster: some-name-source' + set +o xtrace [2026-02-10T13:18:52+0000] deleting PSMDB cluster: some-name-source + kubectl_bin delete psmdb some-name-source ++ mktemp + local LAST_OUT=/tmp/tmp.I35pAB21Ll ++ mktemp + local LAST_ERR=/tmp/tmp.Wpy1ToGXoM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb some-name-source + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I35pAB21Ll perconaservermongodb.psmdb.percona.com "some-name-source" deleted from replset-remapping-sharded-30354 namespace + cat /tmp/tmp.Wpy1ToGXoM + rm /tmp/tmp.I35pAB21Ll /tmp/tmp.Wpy1ToGXoM + return 0 + wait_for_delete psmdb/some-name-source + local res=psmdb/some-name-source + local wait_time=60 + set +o xtrace waiting for psmdb/some-name-source to be deletedError from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name-source" not found Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name-source" not found Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name-source" not found Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name-source" not found + wait_for_delete pod/some-name-source-rs0-0 + local res=pod/some-name-source-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/some-name-source-rs0-0 to be deletedError from server (NotFound): pods "some-name-source-rs0-0" not found Error from server (NotFound): pods "some-name-source-rs0-0" not found Error from server (NotFound): pods "some-name-source-rs0-0" not found Error from server (NotFound): pods "some-name-source-rs0-0" not found + setup_target_cluster + desc 'setting up target cluster: some-name-target' + set +o xtrace ----------------------------------------------------------------------------------- setting up target cluster: some-name-target ----------------------------------------------------------------------------------- + log 'creating PSMDB cluster: some-name-target' + set +o xtrace [2026-02-10T13:19:26+0000] creating PSMDB cluster: some-name-target + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/conf/secrets_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.qj1oSUUniF ++ mktemp + local LAST_ERR=/tmp/tmp.luoK3NrNzO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/conf/secrets_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qj1oSUUniF secret/some-users configured secret/some-name-ssl unchanged secret/some-name-ssl-internal unchanged + cat /tmp/tmp.luoK3NrNzO Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.qj1oSUUniF /tmp/tmp.luoK3NrNzO + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/some-name-target.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/some-name-target.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/some-name-target.yml + kubectl_bin apply -f - + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2225-9afc535d"' + /usr/sbin/sed -e s/NAME_SPACE/replset-remapping-sharded-30354/g + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.dVMXpBNPI7 ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_ERR=/tmp/tmp.e0Y123dd9s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dVMXpBNPI7 perconaservermongodb.psmdb.percona.com/some-name-target created + cat /tmp/tmp.e0Y123dd9s + rm /tmp/tmp.dVMXpBNPI7 /tmp/tmp.e0Y123dd9s + return 0 + wait_for_running some-name-target-cfg 3 + local name=some-name-target-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name-target ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-target-cfg-0 + local pod=some-name-target-cfg-0 + set +o xtrace waiting for pod/some-name-target-cfg-0 to be ready........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-target-cfg-1 + local pod=some-name-target-cfg-1 + set +o xtrace waiting for pod/some-name-target-cfg-1 to be ready........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OT9ibOM7Ls +++ mktemp ++ local LAST_ERR=/tmp/tmp.RZe2w7FGGO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OT9ibOM7Ls ++ cat /tmp/tmp.RZe2w7FGGO ++ rm /tmp/tmp.OT9ibOM7Ls /tmp/tmp.RZe2w7FGGO ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-target-cfg-2 + local pod=some-name-target-cfg-2 + set +o xtrace waiting for pod/some-name-target-cfg-2 to be ready.......OK ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rMxHE5pGRP +++ mktemp ++ local LAST_ERR=/tmp/tmp.d3bZ3vlUrW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rMxHE5pGRP ++ cat /tmp/tmp.d3bZ3vlUrW ++ rm /tmp/tmp.rMxHE5pGRP /tmp/tmp.d3bZ3vlUrW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2hBl8tnm2s +++ mktemp ++ local LAST_ERR=/tmp/tmp.hoC67zSxvg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2hBl8tnm2s ++ cat /tmp/tmp.hoC67zSxvg ++ rm /tmp/tmp.2hBl8tnm2s /tmp/tmp.hoC67zSxvg ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.......................................................... + wait_for_running some-name-target-replset0 3 + local name=some-name-target-replset0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=replset0 + local cluster_name=some-name-target ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-target-replset0-0 + local pod=some-name-target-replset0-0 + set +o xtrace waiting for pod/some-name-target-replset0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-target-replset0-1 + local pod=some-name-target-replset0-1 + set +o xtrace waiting for pod/some-name-target-replset0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GzqqGt39kR +++ mktemp ++ local LAST_ERR=/tmp/tmp.4PKlBy4guh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GzqqGt39kR ++ cat /tmp/tmp.4PKlBy4guh ++ rm /tmp/tmp.GzqqGt39kR /tmp/tmp.4PKlBy4guh ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-target-replset0-2 + local pod=some-name-target-replset0-2 + set +o xtrace waiting for pod/some-name-target-replset0-2 to be ready.OK ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nwBtJXZP56 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XBiUS2bHDW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nwBtJXZP56 ++ cat /tmp/tmp.XBiUS2bHDW ++ rm /tmp/tmp.nwBtJXZP56 /tmp/tmp.XBiUS2bHDW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LVEbsESraL +++ mktemp ++ local LAST_ERR=/tmp/tmp.zP5SFDdplO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LVEbsESraL ++ cat /tmp/tmp.zP5SFDdplO ++ rm /tmp/tmp.LVEbsESraL /tmp/tmp.zP5SFDdplO ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-target-replset1 3 + local name=some-name-target-replset1 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=replset1 + local cluster_name=some-name-target ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-target-replset1-0 + local pod=some-name-target-replset1-0 + set +o xtrace waiting for pod/some-name-target-replset1-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-target-replset1-1 + local pod=some-name-target-replset1-1 + set +o xtrace waiting for pod/some-name-target-replset1-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset1")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aheg3Rb4cN +++ mktemp ++ local LAST_ERR=/tmp/tmp.MvCVL73Hwe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset1")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aheg3Rb4cN ++ cat /tmp/tmp.MvCVL73Hwe ++ rm /tmp/tmp.aheg3Rb4cN /tmp/tmp.MvCVL73Hwe ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-target-replset1-2 + local pod=some-name-target-replset1-2 + set +o xtrace waiting for pod/some-name-target-replset1-2 to be ready.OK ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset1")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1805gTTBd6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uRBzxBKjg5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset1")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1805gTTBd6 ++ cat /tmp/tmp.uRBzxBKjg5 ++ rm /tmp/tmp.1805gTTBd6 /tmp/tmp.uRBzxBKjg5 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset1")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X9L5Otmmey +++ mktemp ++ local LAST_ERR=/tmp/tmp.P7NHU4l4VF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="replset1")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X9L5Otmmey ++ cat /tmp/tmp.P7NHU4l4VF ++ rm /tmp/tmp.X9L5Otmmey /tmp/tmp.P7NHU4l4VF ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-target-mongos 3 + local name=some-name-target-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name-target ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-target-mongos-0 + local pod=some-name-target-mongos-0 + set +o xtrace waiting for pod/some-name-target-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-target-mongos-1 + local pod=some-name-target-mongos-1 + set +o xtrace waiting for pod/some-name-target-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rJ0LWSqt3n +++ mktemp ++ local LAST_ERR=/tmp/tmp.CKiug6DvVf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rJ0LWSqt3n ++ cat /tmp/tmp.CKiug6DvVf ++ rm /tmp/tmp.rJ0LWSqt3n /tmp/tmp.CKiug6DvVf ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-target-mongos-2 + local pod=some-name-target-mongos-2 + set +o xtrace waiting for pod/some-name-target-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TUghjO2kIo +++ mktemp ++ local LAST_ERR=/tmp/tmp.WVikOkWCYp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TUghjO2kIo ++ cat /tmp/tmp.WVikOkWCYp ++ rm /tmp/tmp.TUghjO2kIo /tmp/tmp.WVikOkWCYp ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pYqhtp9J1V +++ mktemp ++ local LAST_ERR=/tmp/tmp.zIGJXNXOpD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pYqhtp9J1V ++ cat /tmp/tmp.zIGJXNXOpD ++ rm /tmp/tmp.pYqhtp9J1V /tmp/tmp.zIGJXNXOpD ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name-target + local cluster_name=some-name-target + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LlmX3d5f9T +++ mktemp ++ local LAST_ERR=/tmp/tmp.cd12a7vjya ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LlmX3d5f9T ++ cat /tmp/tmp.cd12a7vjya ++ rm /tmp/tmp.LlmX3d5f9T /tmp/tmp.cd12a7vjya ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + test_restores + desc 'CASE 1: Logical restore with replset remapping' + set +o xtrace ----------------------------------------------------------------------------------- CASE 1: Logical restore with replset remapping ----------------------------------------------------------------------------------- + run_restore backup-minio-logical + local backup_name=backup-minio-logical + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/restore.yml + log 'running restore restore-backup-minio-logical' + set +o xtrace [2026-02-10T13:23:59+0000] running restore restore-backup-minio-logical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-logical/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-logical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.RfiXmrFz4a ++ mktemp + local LAST_ERR=/tmp/tmp.JDARmDWw9g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RfiXmrFz4a perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-logical created + cat /tmp/tmp.JDARmDWw9g + rm /tmp/tmp.RfiXmrFz4a /tmp/tmp.JDARmDWw9g + return 0 + wait_restore backup-minio-logical some-name-target + local backup_name=backup-minio-logical + local cluster_name=some-name-target + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-logical object to be created.OK Waiting psmdb-restore/restore-backup-minio-logical to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name-target + local cluster_name=some-name-target + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.75bVCMqjcG +++ mktemp ++ local LAST_ERR=/tmp/tmp.WTrP9sd69Q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.75bVCMqjcG ++ cat /tmp/tmp.WTrP9sd69Q ++ rm /tmp/tmp.75bVCMqjcG /tmp/tmp.WTrP9sd69Q ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lnKDrmdHMG +++ mktemp ++ local LAST_ERR=/tmp/tmp.IVm7jmgHCQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lnKDrmdHMG ++ cat /tmp/tmp.IVm7jmgHCQ ++ rm /tmp/tmp.lnKDrmdHMG /tmp/tmp.IVm7jmgHCQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c2uvJ7aUQC +++ mktemp ++ local LAST_ERR=/tmp/tmp.FEP79hwBZT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c2uvJ7aUQC ++ cat /tmp/tmp.FEP79hwBZT ++ rm /tmp/tmp.c2uvJ7aUQC /tmp/tmp.FEP79hwBZT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xc6s0PTtNJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.8kP3TmznUY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Xc6s0PTtNJ ++ cat /tmp/tmp.8kP3TmznUY ++ rm /tmp/tmp.Xc6s0PTtNJ /tmp/tmp.8kP3TmznUY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2FrksXz9TV +++ mktemp ++ local LAST_ERR=/tmp/tmp.A4kA7uXIUn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2FrksXz9TV ++ cat /tmp/tmp.A4kA7uXIUn ++ rm /tmp/tmp.2FrksXz9TV /tmp/tmp.A4kA7uXIUn ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + wait_cluster_consistency some-name-target + local cluster_name=some-name-target + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OnEboKUeja +++ mktemp ++ local LAST_ERR=/tmp/tmp.OblJhJ7epz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OnEboKUeja ++ cat /tmp/tmp.OblJhJ7epz ++ rm /tmp/tmp.OnEboKUeja /tmp/tmp.OblJhJ7epz ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongos_cmd find databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354 + local command=find + local uri=databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-02-10T13:25:34+0000] running db.test.command() in myApp + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + run_mongos 'use myApp\n db.test.find()' databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0wp4n3cBN2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RmrxNjUQkX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0wp4n3cBN2 ++ cat /tmp/tmp.RmrxNjUQkX ++ rm /tmp/tmp.0wp4n3cBN2 /tmp/tmp.RmrxNjUQkX ++ return 0 + local client_container=psmdb-client-86cb5d8484-rpcfb + kubectl_bin exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Ybogh0lxU1 ++ mktemp + local LAST_ERR=/tmp/tmp.2CzbHkspBF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ybogh0lxU1 + cat /tmp/tmp.2CzbHkspBF + rm /tmp/tmp.Ybogh0lxU1 /tmp/tmp.2CzbHkspBF + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/compare/find.json /tmp/tmp.JGma3ixp9C/find + desc 'CASE 1: PASSED' + set +o xtrace ----------------------------------------------------------------------------------- CASE 1: PASSED ----------------------------------------------------------------------------------- + log 'dropping test collection' + set +o xtrace [2026-02-10T13:25:36+0000] dropping test collection + run_mongos 'use myApp\n db.test.drop()' databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354 + local 'command=use myApp\n db.test.drop()' + local uri=databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m3kOJR7ke5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ff21QfL2rD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m3kOJR7ke5 ++ cat /tmp/tmp.Ff21QfL2rD ++ rm /tmp/tmp.m3kOJR7ke5 /tmp/tmp.Ff21QfL2rD ++ return 0 + local client_container=psmdb-client-86cb5d8484-rpcfb + kubectl_bin exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.nZ7bo5hBnU ++ mktemp + local LAST_ERR=/tmp/tmp.IY5ZkenQLq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nZ7bo5hBnU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("48b87f55-4a04-4c96-b3f8-2e26955ef6e0") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.IY5ZkenQLq + rm /tmp/tmp.nZ7bo5hBnU /tmp/tmp.IY5ZkenQLq + return 0 + desc 'CASE 2: Physical restore with replset remapping' + set +o xtrace ----------------------------------------------------------------------------------- CASE 2: Physical restore with replset remapping ----------------------------------------------------------------------------------- + run_restore backup-minio-physical + local backup_name=backup-minio-physical + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/restore.yml + log 'running restore restore-backup-minio-physical' + set +o xtrace [2026-02-10T13:25:38+0000] running restore restore-backup-minio-physical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-physical/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.CYhuq5VNCo ++ mktemp + local LAST_ERR=/tmp/tmp.tpWdjLGKXk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CYhuq5VNCo perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-physical created + cat /tmp/tmp.tpWdjLGKXk + rm /tmp/tmp.CYhuq5VNCo /tmp/tmp.tpWdjLGKXk + return 0 + wait_restore backup-minio-physical some-name-target + local backup_name=backup-minio-physical + local cluster_name=some-name-target + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-physical object to be created.OK Waiting psmdb-restore/restore-backup-minio-physical to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name-target + local cluster_name=some-name-target + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cOWfEe02VT +++ mktemp ++ local LAST_ERR=/tmp/tmp.EZRNGLprtV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cOWfEe02VT ++ cat /tmp/tmp.EZRNGLprtV ++ rm /tmp/tmp.cOWfEe02VT /tmp/tmp.EZRNGLprtV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AFxIJmWACr +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gnw6OCFSsu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AFxIJmWACr ++ cat /tmp/tmp.Gnw6OCFSsu ++ rm /tmp/tmp.AFxIJmWACr /tmp/tmp.Gnw6OCFSsu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KzepOkeaZH +++ mktemp ++ local LAST_ERR=/tmp/tmp.UIcrjrT3lB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KzepOkeaZH ++ cat /tmp/tmp.UIcrjrT3lB ++ rm /tmp/tmp.KzepOkeaZH /tmp/tmp.UIcrjrT3lB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U7rsw9tFZa +++ mktemp ++ local LAST_ERR=/tmp/tmp.M68BhizDdw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U7rsw9tFZa ++ cat /tmp/tmp.M68BhizDdw ++ rm /tmp/tmp.U7rsw9tFZa /tmp/tmp.M68BhizDdw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vOlQsSISBd +++ mktemp ++ local LAST_ERR=/tmp/tmp.2vntJiSTTm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vOlQsSISBd ++ cat /tmp/tmp.2vntJiSTTm ++ rm /tmp/tmp.vOlQsSISBd /tmp/tmp.2vntJiSTTm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.08MdWIpwJy +++ mktemp ++ local LAST_ERR=/tmp/tmp.NcTdbx3GKL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.08MdWIpwJy ++ cat /tmp/tmp.NcTdbx3GKL ++ rm /tmp/tmp.08MdWIpwJy /tmp/tmp.NcTdbx3GKL ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + wait_cluster_consistency some-name-target + local cluster_name=some-name-target + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eKDGKu4hIR +++ mktemp ++ local LAST_ERR=/tmp/tmp.BmO9Dussey ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eKDGKu4hIR ++ cat /tmp/tmp.BmO9Dussey ++ rm /tmp/tmp.eKDGKu4hIR /tmp/tmp.BmO9Dussey ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongos_cmd find databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354 + local command=find + local uri=databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-02-10T13:27:30+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.a3IQpkvLPf +++ mktemp ++ local LAST_ERR=/tmp/tmp.JXX1PlPZjC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a3IQpkvLPf ++ cat /tmp/tmp.JXX1PlPZjC ++ rm /tmp/tmp.a3IQpkvLPf /tmp/tmp.JXX1PlPZjC ++ return 0 + local client_container=psmdb-client-86cb5d8484-rpcfb + kubectl_bin exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.w7MLkiQvyK ++ mktemp + local LAST_ERR=/tmp/tmp.21sH6oAPM0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-rpcfb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://databaseAdmin:databaseAdmin123456@some-name-target-mongos.replset-remapping-sharded-30354.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w7MLkiQvyK + cat /tmp/tmp.21sH6oAPM0 + rm /tmp/tmp.w7MLkiQvyK /tmp/tmp.21sH6oAPM0 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/e2e-tests/replset-remapping-sharded/compare/find.json /tmp/tmp.JGma3ixp9C/find + desc 'CASE 2: PASSED' + set +o xtrace ----------------------------------------------------------------------------------- CASE 2: PASSED ----------------------------------------------------------------------------------- + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + destroy replset-remapping-sharded-30354 + local namespace=replset-remapping-sharded-30354 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.YFhzmAgStz +++ mktemp ++ local LAST_ERR=/tmp/tmp.DF805TGrwY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YFhzmAgStz ++ cat /tmp/tmp.DF805TGrwY ++ rm /tmp/tmp.YFhzmAgStz /tmp/tmp.DF805TGrwY ++ return 0 + '[' 2 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.JDbR15vDKE ++ mktemp + local LAST_ERR=/tmp/tmp.zijO5kD1Ur + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JDbR15vDKE NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-minio-logical some-name-source minio s3://operator-testing/2026-02-10T13:17:43Z logical 121.39KB ready 9m23s 9m52s backup-minio-physical some-name-source minio s3://operator-testing/2026-02-10T13:18:18Z logical 507.08KB ready 8m46s 9m16s + cat /tmp/tmp.zijO5kD1Ur + rm /tmp/tmp.JDbR15vDKE /tmp/tmp.zijO5kD1Ur + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.StxbsGOTF2 ++ mktemp + local LAST_ERR=/tmp/tmp.yigMNjJflv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.StxbsGOTF2 perconaservermongodbbackup.psmdb.percona.com "backup-minio-logical" deleted from replset-remapping-sharded-30354 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-physical" deleted from replset-remapping-sharded-30354 namespace + cat /tmp/tmp.yigMNjJflv + rm /tmp/tmp.StxbsGOTF2 /tmp/tmp.yigMNjJflv + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.4sRmfp4joF ++ mktemp + local LAST_ERR=/tmp/tmp.v995VQX3b2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4sRmfp4joF customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.v995VQX3b2 + rm /tmp/tmp.4sRmfp4joF /tmp/tmp.v995VQX3b2 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.jx2CKUHe7A ++ mktemp + local LAST_ERR=/tmp/tmp.12cqgL3htV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jx2CKUHe7A + cat /tmp/tmp.12cqgL3htV + rm /tmp/tmp.jx2CKUHe7A /tmp/tmp.12cqgL3htV + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.q9S3DpilaB ++ mktemp + local LAST_ERR=/tmp/tmp.m2OPAkY9Qd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q9S3DpilaB + cat /tmp/tmp.m2OPAkY9Qd + rm /tmp/tmp.q9S3DpilaB /tmp/tmp.m2OPAkY9Qd + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Nh8AcrGObz ++ mktemp + local LAST_ERR=/tmp/tmp.C00SgWrqdW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Nh8AcrGObz + cat /tmp/tmp.C00SgWrqdW + rm /tmp/tmp.Nh8AcrGObz /tmp/tmp.C00SgWrqdW + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.P2sJJx3WAL ++ mktemp + local LAST_ERR=/tmp/tmp.JzvP11O3bG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2225/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P2sJJx3WAL clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.JzvP11O3bG + rm /tmp/tmp.P2sJJx3WAL /tmp/tmp.JzvP11O3bG + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.2rCWRAn7EE ++ mktemp + local LAST_ERR=/tmp/tmp.0HO9uc5yMu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.2rCWRAn7EE + cat /tmp/tmp.0HO9uc5yMu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.2rCWRAn7EE + cat /tmp/tmp.0HO9uc5yMu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.2rCWRAn7EE + cat /tmp/tmp.0HO9uc5yMu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.2rCWRAn7EE + cat /tmp/tmp.0HO9uc5yMu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.2rCWRAn7EE /tmp/tmp.0HO9uc5yMu + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.JGma3ixp9C + kubectl_bin delete --grace-period=0 --force=true namespace replset-remapping-sharded-30354 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.ttuBVijFcD + local LAST_OUT=/tmp/tmp.qZWvRTaOKN ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Atla55CHif + local LAST_ERR=/tmp/tmp.VmVD6Epeb2 + local exit_status=0 + local exit_status=0 + local timeout=4 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace replset-remapping-sharded-30354