Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/logs/custom-users-roles.log WARNING: version difference between client (1.31) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.31) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.31) and server (1.27) exceeds the supported minor version skew of +/-1 + psmdb=some-name + cluster=some-name-rs0 + create_infra custom-users-roles-21248 + local ns=custom-users-roles-21248 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.yO9LWdlTGt ++ mktemp + local LAST_ERR=/tmp/tmp.WlTYZTYAW7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yO9LWdlTGt customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.WlTYZTYAW7 + rm /tmp/tmp.yO9LWdlTGt /tmp/tmp.WlTYZTYAW7 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.A6uXuZCos8 ++ mktemp + local LAST_ERR=/tmp/tmp.XYxEaUmT0n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A6uXuZCos8 + cat /tmp/tmp.XYxEaUmT0n + rm /tmp/tmp.A6uXuZCos8 /tmp/tmp.XYxEaUmT0n + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.YktTkrDufX ++ mktemp + local LAST_ERR=/tmp/tmp.nllXhJ4tar + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YktTkrDufX + cat /tmp/tmp.nllXhJ4tar + rm /tmp/tmp.YktTkrDufX /tmp/tmp.nllXhJ4tar + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.HalUzBCDNF ++ mktemp + local LAST_ERR=/tmp/tmp.mQRwhLVED9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HalUzBCDNF + cat /tmp/tmp.mQRwhLVED9 + rm /tmp/tmp.HalUzBCDNF /tmp/tmp.mQRwhLVED9 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.62vFKE7IkB ++ mktemp + local LAST_ERR=/tmp/tmp.7k9LLVaCFe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.62vFKE7IkB clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.7k9LLVaCFe + rm /tmp/tmp.62vFKE7IkB /tmp/tmp.7k9LLVaCFe + return 0 + check_crd_for_deletion PR-1639-7728af52 + local git_tag=PR-1639-7728af52 ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g ++ yq eval .metadata.name ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1639-7728af52/deploy/crd.yaml + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nHRnyDXT27 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vEAUQ2ojXC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.nHRnyDXT27 ++ cat /tmp/tmp.vEAUQ2ojXC Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.nHRnyDXT27 ++ cat /tmp/tmp.vEAUQ2ojXC Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.nHRnyDXT27 ++ cat /tmp/tmp.vEAUQ2ojXC Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.nHRnyDXT27 ++ cat /tmp/tmp.vEAUQ2ojXC Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.nHRnyDXT27 /tmp/tmp.vEAUQ2ojXC ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.um4ZMAlhSD + local LAST_OUT=/tmp/tmp.h8mqAXVbA3 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.VoGVNWXvph + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.Oydg2uG6VB + local exit_status=0 + local timeout=4 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.h8mqAXVbA3 + cat /tmp/tmp.Oydg2uG6VB + rm /tmp/tmp.h8mqAXVbA3 /tmp/tmp.Oydg2uG6VB + return 0 namespace "custom-users-roles-8075" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.um4ZMAlhSD namespace "psmdb-operator" deleted + cat /tmp/tmp.VoGVNWXvph + rm /tmp/tmp.um4ZMAlhSD /tmp/tmp.VoGVNWXvph + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.cyWIAzgs54 ++ mktemp + local LAST_ERR=/tmp/tmp.x5vTu1JJOp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cyWIAzgs54 + cat /tmp/tmp.x5vTu1JJOp + rm /tmp/tmp.cyWIAzgs54 /tmp/tmp.x5vTu1JJOp + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.2fONHx03vM ++ mktemp + local LAST_ERR=/tmp/tmp.VoyK9I4VdA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2fONHx03vM namespace/psmdb-operator created + cat /tmp/tmp.VoyK9I4VdA + rm /tmp/tmp.2fONHx03vM /tmp/tmp.VoyK9I4VdA + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.uWNtbfoiaM +++ mktemp ++ local LAST_ERR=/tmp/tmp.KrfawmCrSP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uWNtbfoiaM ++ cat /tmp/tmp.KrfawmCrSP ++ rm /tmp/tmp.uWNtbfoiaM /tmp/tmp.KrfawmCrSP ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1639-7728af52-15-cluster5 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.y02odGyLoC ++ mktemp + local LAST_ERR=/tmp/tmp.AJR0kvZXGq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1639-7728af52-15-cluster5 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y02odGyLoC Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1639-7728af52-15-cluster5" modified. + cat /tmp/tmp.AJR0kvZXGq + rm /tmp/tmp.y02odGyLoC /tmp/tmp.AJR0kvZXGq + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.vaYotnHQpa ++ mktemp + local LAST_ERR=/tmp/tmp.J7BRUmMmbz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vaYotnHQpa customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.J7BRUmMmbz + rm /tmp/tmp.vaYotnHQpa /tmp/tmp.J7BRUmMmbz + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2ZzsoBpH8p ++ mktemp + local LAST_ERR=/tmp/tmp.UjRYl8AORX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2ZzsoBpH8p clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.UjRYl8AORX + rm /tmp/tmp.2ZzsoBpH8p /tmp/tmp.UjRYl8AORX + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1639-7728af52") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.p8MPSNayTm ++ mktemp + local LAST_ERR=/tmp/tmp.pxdZLlcJaw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p8MPSNayTm deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.pxdZLlcJaw + rm /tmp/tmp.p8MPSNayTm /tmp/tmp.pxdZLlcJaw + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.pPfLhThU4I +++ mktemp ++ local LAST_ERR=/tmp/tmp.8nxREXTBLo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pPfLhThU4I ++ cat /tmp/tmp.8nxREXTBLo ++ rm /tmp/tmp.pPfLhThU4I /tmp/tmp.8nxREXTBLo ++ return 0 + wait_pod percona-server-mongodb-operator-58f845879d-9gb9g + local pod=percona-server-mongodb-operator-58f845879d-9gb9g + set +o xtrace waiting for pod/percona-server-mongodb-operator-58f845879d-9gb9g to be ready.OK + create_namespace custom-users-roles-21248 + local namespace=custom-users-roles-21248 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ awk '{print $1}' ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces custom-users-roles-21248' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces custom-users-roles-21248 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace custom-users-roles-21248 --ignore-not-found + xargs kubectl delete ns ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.G7cy4LMCAK + local LAST_OUT=/tmp/tmp.AuGeoQGVT3 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.ai6BvvKd8I + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.1dla8PVnyi + local exit_status=0 + local timeout=4 + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace custom-users-roles-21248 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G7cy4LMCAK + cat /tmp/tmp.ai6BvvKd8I + rm /tmp/tmp.G7cy4LMCAK /tmp/tmp.ai6BvvKd8I + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AuGeoQGVT3 + cat /tmp/tmp.1dla8PVnyi + rm /tmp/tmp.AuGeoQGVT3 /tmp/tmp.1dla8PVnyi + return 0 + kubectl_bin wait --for=delete namespace custom-users-roles-21248 ++ mktemp + local LAST_OUT=/tmp/tmp.lRhDMK7sO0 ++ mktemp + local LAST_ERR=/tmp/tmp.cqfOK740eW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace custom-users-roles-21248 namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lRhDMK7sO0 + cat /tmp/tmp.cqfOK740eW + rm /tmp/tmp.lRhDMK7sO0 /tmp/tmp.cqfOK740eW + return 0 + desc 'create namespace custom-users-roles-21248' + set +o xtrace ----------------------------------------------------------------------------------- create namespace custom-users-roles-21248 ----------------------------------------------------------------------------------- + kubectl_bin create namespace custom-users-roles-21248 ++ mktemp + local LAST_OUT=/tmp/tmp.lbYAqxFJfU ++ mktemp + local LAST_ERR=/tmp/tmp.mAdlTUFVHq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace custom-users-roles-21248 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lbYAqxFJfU namespace/custom-users-roles-21248 created + cat /tmp/tmp.mAdlTUFVHq + rm /tmp/tmp.lbYAqxFJfU /tmp/tmp.mAdlTUFVHq + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.3RKol0x5vk +++ mktemp ++ local LAST_ERR=/tmp/tmp.a6OhEZSNoc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3RKol0x5vk ++ cat /tmp/tmp.a6OhEZSNoc ++ rm /tmp/tmp.3RKol0x5vk /tmp/tmp.a6OhEZSNoc ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1639-7728af52-15-cluster5 --namespace=custom-users-roles-21248 ++ mktemp + local LAST_OUT=/tmp/tmp.lwno6YNpcO ++ mktemp + local LAST_ERR=/tmp/tmp.c8yXI0UYrR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1639-7728af52-15-cluster5 --namespace=custom-users-roles-21248 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lwno6YNpcO Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1639-7728af52-15-cluster5" modified. + cat /tmp/tmp.c8yXI0UYrR + rm /tmp/tmp.lwno6YNpcO /tmp/tmp.c8yXI0UYrR + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/conf/app-user-secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.KgKsoAO1g8 ++ mktemp + local LAST_ERR=/tmp/tmp.91s2QrnATf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/conf/app-user-secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KgKsoAO1g8 deployment.apps/psmdb-client created secret/some-users created secret/user-one created secret/user-two created + cat /tmp/tmp.91s2QrnATf + rm /tmp/tmp.KgKsoAO1g8 /tmp/tmp.91s2QrnATf + return 0 + mongoUri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/conf/some-name-rs0.yml + kubectl_bin apply -f - + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/conf/some-name-rs0.yml + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + local LAST_OUT=/tmp/tmp.nda6S96vbT + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1639-7728af52"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + local LAST_ERR=/tmp/tmp.2BsWobGlaa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nda6S96vbT perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.2BsWobGlaa + rm /tmp/tmp.nda6S96vbT /tmp/tmp.2BsWobGlaa + return 0 + desc 'Check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- Check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CzVnaTsJ4s +++ mktemp ++ local LAST_ERR=/tmp/tmp.Dm4OqZcwec ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CzVnaTsJ4s ++ cat /tmp/tmp.Dm4OqZcwec ++ rm /tmp/tmp.CzVnaTsJ4s /tmp/tmp.Dm4OqZcwec ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PInOS3dNvT +++ mktemp ++ local LAST_ERR=/tmp/tmp.AYFQOAPGxL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PInOS3dNvT ++ cat /tmp/tmp.AYFQOAPGxL ++ rm /tmp/tmp.PInOS3dNvT /tmp/tmp.AYFQOAPGxL ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness...... + desc 'check user created on cluster creation' + set +o xtrace ----------------------------------------------------------------------------------- check user created on cluster creation ----------------------------------------------------------------------------------- + userOne=user-one ++ getSecretData user-one userOnePassKey ++ local secretName=user-one ++ local dataKey=userOnePassKey +++ kubectl get secrets/user-one '--template={{.data.userOnePassKey}}' +++ base64 -d ++ local data=clusterMonitor ++ echo clusterMonitor + userOnePass=clusterMonitor + compare admin 'db.getUser("user-one")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-one + local database=admin + local 'command=db.getUser("user-one")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-one + run_mongo 'use admin\n db.getUser("user-one")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getUser("user-one")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0e68k1qH57 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WGUrMt8a8n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0e68k1qH57 ++ cat /tmp/tmp.WGUrMt8a8n ++ rm /tmp/tmp.0e68k1qH57 /tmp/tmp.WGUrMt8a8n ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-one")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ZVoibFSnTX ++ mktemp + local LAST_ERR=/tmp/tmp.8uB1bSKxrM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-one")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZVoibFSnTX + cat /tmp/tmp.8uB1bSKxrM + rm /tmp/tmp.ZVoibFSnTX /tmp/tmp.8uB1bSKxrM + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-one.json /tmp/tmp.NBvEbPh8ry/user-one + check_mongo_auth user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 + local uri=user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local uri=user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oeh0hM54Zh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.d8JGfcPEgh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.oeh0hM54Zh +++ cat /tmp/tmp.d8JGfcPEgh +++ rm /tmp/tmp.oeh0hM54Zh /tmp/tmp.d8JGfcPEgh +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-6zkzr ++ local mongo_flag=--quiet ++ [[ user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lB7PsfSBxu +++ mktemp ++ local LAST_ERR=/tmp/tmp.qEqpG53bSW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lB7PsfSBxu ++ cat /tmp/tmp.qEqpG53bSW ++ rm /tmp/tmp.lB7PsfSBxu /tmp/tmp.qEqpG53bSW ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'delete initial user from CR and create a new one' + set +o xtrace ----------------------------------------------------------------------------------- delete initial user from CR and create a new one ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.ThMMvOfkhJ ++ mktemp + local LAST_ERR=/tmp/tmp.kPjOn4D8lN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ThMMvOfkhJ perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.kPjOn4D8lN + rm /tmp/tmp.ThMMvOfkhJ /tmp/tmp.kPjOn4D8lN + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iEvtG7aGq8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gNoNCAIbHc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iEvtG7aGq8 ++ cat /tmp/tmp.gNoNCAIbHc ++ rm /tmp/tmp.iEvtG7aGq8 /tmp/tmp.gNoNCAIbHc ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sX4UBM57HJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ua4SzjzqDJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sX4UBM57HJ ++ cat /tmp/tmp.Ua4SzjzqDJ ++ rm /tmp/tmp.sX4UBM57HJ /tmp/tmp.Ua4SzjzqDJ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-two + local database=admin + local 'command=db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-two + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + run_mongo 'use admin\n db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + sed '/"userId"/d' ++ local LAST_OUT=/tmp/tmp.xZgNzFZEcc +++ mktemp ++ local LAST_ERR=/tmp/tmp.gMCk0TgXHh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xZgNzFZEcc ++ cat /tmp/tmp.gMCk0TgXHh ++ rm /tmp/tmp.xZgNzFZEcc /tmp/tmp.gMCk0TgXHh ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vAmjRQ2j5N ++ mktemp + local LAST_ERR=/tmp/tmp.drfsXnyL3s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vAmjRQ2j5N + cat /tmp/tmp.drfsXnyL3s + rm /tmp/tmp.vAmjRQ2j5N /tmp/tmp.drfsXnyL3s + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-two.json /tmp/tmp.NBvEbPh8ry/user-two + userTwo=user-two ++ getSecretData user-two userTwoPassKey ++ local secretName=user-two ++ local dataKey=userTwoPassKey +++ kubectl get secrets/user-two '--template={{.data.userTwoPassKey}}' +++ base64 -d ++ local data=clusterMonitor ++ echo clusterMonitor + userTwoPass=clusterMonitor + check_mongo_auth user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 + local uri=user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ local LAST_OUT=/tmp/tmp.4BWY5wgwRR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1huls9N6OO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4BWY5wgwRR +++ cat /tmp/tmp.1huls9N6OO +++ rm /tmp/tmp.4BWY5wgwRR /tmp/tmp.1huls9N6OO +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-6zkzr ++ local mongo_flag=--quiet ++ [[ user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B4YLOhZ2Xk +++ mktemp ++ local LAST_ERR=/tmp/tmp.lFlqXeCxvb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.B4YLOhZ2Xk ++ cat /tmp/tmp.lFlqXeCxvb ++ rm /tmp/tmp.B4YLOhZ2Xk /tmp/tmp.lFlqXeCxvb ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 + local uri=user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.r0zlOYFYhd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gf7YEtKGam +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.r0zlOYFYhd +++ cat /tmp/tmp.gf7YEtKGam +++ rm /tmp/tmp.r0zlOYFYhd /tmp/tmp.gf7YEtKGam +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-6zkzr ++ local mongo_flag=--quiet ++ [[ user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.52ukUag4Bg +++ mktemp ++ local LAST_ERR=/tmp/tmp.YBIR37vXK0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.52ukUag4Bg ++ cat /tmp/tmp.YBIR37vXK0 ++ rm /tmp/tmp.52ukUag4Bg /tmp/tmp.YBIR37vXK0 ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'check password change' + set +o xtrace ----------------------------------------------------------------------------------- check password change ----------------------------------------------------------------------------------- + userTwoNewPass=new-user-two-password ++ echo -n new-user-two-password ++ base64 + patch_secret user-two userTwoPassKey bmV3LXVzZXItdHdvLXBhc3N3b3Jk + local secret=user-two + local key=userTwoPassKey + local value=bmV3LXVzZXItdHdvLXBhc3N3b3Jk + kubectl patch secret user-two '-p={"data":{"userTwoPassKey": "bmV3LXVzZXItdHdvLXBhc3N3b3Jk"}}' secret/user-two patched + sleep 20 + check_mongo_auth user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 + local uri=user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 mongodb '' --quiet ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.f8EkLjCyyh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AswAMBHC6J +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.f8EkLjCyyh +++ cat /tmp/tmp.AswAMBHC6J +++ rm /tmp/tmp.f8EkLjCyyh /tmp/tmp.AswAMBHC6J +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-6zkzr ++ local mongo_flag=--quiet ++ [[ user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1SR9D3rQHV +++ mktemp ++ local LAST_ERR=/tmp/tmp.CnkfaEy6jE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1SR9D3rQHV ++ cat /tmp/tmp.CnkfaEy6jE ++ rm /tmp/tmp.1SR9D3rQHV /tmp/tmp.CnkfaEy6jE ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'check user roles update from CR' + set +o xtrace ----------------------------------------------------------------------------------- check user roles update from CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.WexN2eVbLh ++ mktemp + local LAST_ERR=/tmp/tmp.7RQMbgjeyA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WexN2eVbLh perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.7RQMbgjeyA + rm /tmp/tmp.WexN2eVbLh /tmp/tmp.7RQMbgjeyA + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZpJppXX4z4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.pZOyei2m69 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZpJppXX4z4 ++ cat /tmp/tmp.pZOyei2m69 ++ rm /tmp/tmp.ZpJppXX4z4 /tmp/tmp.pZOyei2m69 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CiNb2r01eM +++ mktemp ++ local LAST_ERR=/tmp/tmp.aRdY7YbOTo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CiNb2r01eM ++ cat /tmp/tmp.aRdY7YbOTo ++ rm /tmp/tmp.CiNb2r01eM /tmp/tmp.aRdY7YbOTo ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-two-update-roles + local database=admin + local 'command=db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-two-update-roles + run_mongo 'use admin\n db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EU0KQu4V90 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3KXJXGUWbF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EU0KQu4V90 ++ cat /tmp/tmp.3KXJXGUWbF ++ rm /tmp/tmp.EU0KQu4V90 /tmp/tmp.3KXJXGUWbF ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Ox99pD7DXN ++ mktemp + local LAST_ERR=/tmp/tmp.zRBzWPNZy7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ox99pD7DXN + cat /tmp/tmp.zRBzWPNZy7 + rm /tmp/tmp.Ox99pD7DXN /tmp/tmp.zRBzWPNZy7 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-two-update-roles.json /tmp/tmp.NBvEbPh8ry/user-two-update-roles + desc 'check user roles update from DB' + set +o xtrace ----------------------------------------------------------------------------------- check user roles update from DB ----------------------------------------------------------------------------------- + run_mongo 'use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local 'command=use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r7D7LVplZG +++ mktemp ++ local LAST_ERR=/tmp/tmp.gZzKzMUYgP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r7D7LVplZG ++ cat /tmp/tmp.gZzKzMUYgP ++ rm /tmp/tmp.r7D7LVplZG /tmp/tmp.gZzKzMUYgP ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.T2jAE3YQ8R ++ mktemp + local LAST_ERR=/tmp/tmp.tWfDB5oInb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T2jAE3YQ8R Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("914d06e1-f904-48c0-bcea-a03813f4ff90") } Percona Server for MongoDB server version: v7.0.14-8 WARNING: shell and server versions do not match switched to db admin bye + cat /tmp/tmp.tWfDB5oInb + rm /tmp/tmp.T2jAE3YQ8R /tmp/tmp.tWfDB5oInb + return 0 + sleep 15 + compare admin 'db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-two-update-roles + local database=admin + local 'command=db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-two-update-roles + run_mongo 'use admin\n db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getUser("user-two")' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HANz3OLstW +++ mktemp ++ local LAST_ERR=/tmp/tmp.iHMo51sHYz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HANz3OLstW ++ cat /tmp/tmp.iHMo51sHYz ++ rm /tmp/tmp.HANz3OLstW /tmp/tmp.iHMo51sHYz ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.XY1AOJzjeX ++ mktemp + local LAST_ERR=/tmp/tmp.Kyz9n8SH9x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XY1AOJzjeX + cat /tmp/tmp.Kyz9n8SH9x + rm /tmp/tmp.XY1AOJzjeX /tmp/tmp.Kyz9n8SH9x + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-two-update-roles.json /tmp/tmp.NBvEbPh8ry/user-two-update-roles + desc 'check user recreated after deleted from DB' + set +o xtrace ----------------------------------------------------------------------------------- check user recreated after deleted from DB ----------------------------------------------------------------------------------- + run_mongo 'use admin\n db.dropUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local 'command=use admin\n db.dropUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QjbEBKIHAh +++ mktemp ++ local LAST_ERR=/tmp/tmp.h71QMeyxfw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QjbEBKIHAh ++ cat /tmp/tmp.h71QMeyxfw ++ rm /tmp/tmp.QjbEBKIHAh /tmp/tmp.h71QMeyxfw ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.dropUser("user-two")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.xOKou0BbSz ++ mktemp + local LAST_ERR=/tmp/tmp.0s0MWJw0jq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.dropUser("user-two")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xOKou0BbSz Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("538724b9-b661-4a01-bcfe-763bdc7e6208") } Percona Server for MongoDB server version: v7.0.14-8 WARNING: shell and server versions do not match switched to db admin true bye + cat /tmp/tmp.0s0MWJw0jq + rm /tmp/tmp.xOKou0BbSz /tmp/tmp.0s0MWJw0jq + return 0 + sleep 15 + compare admin 'db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-two-update-roles + local database=admin + local 'command=db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-two-update-roles + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + sed '/"userId"/d' + run_mongo 'use admin\n db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' ++ local LAST_OUT=/tmp/tmp.KGsloZxhuj +++ mktemp ++ local LAST_ERR=/tmp/tmp.lMmw46XG4w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KGsloZxhuj ++ cat /tmp/tmp.lMmw46XG4w ++ rm /tmp/tmp.KGsloZxhuj /tmp/tmp.lMmw46XG4w ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.AFLoAYRQPx ++ mktemp + local LAST_ERR=/tmp/tmp.EsdR2s6q8Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AFLoAYRQPx + cat /tmp/tmp.EsdR2s6q8Q + rm /tmp/tmp.AFLoAYRQPx /tmp/tmp.EsdR2s6q8Q + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-two-update-roles.json /tmp/tmp.NBvEbPh8ry/user-two-update-roles + desc 'check new user created after updated user name via CR' + set +o xtrace ----------------------------------------------------------------------------------- check new user created after updated user name via CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.wOMNvfWcXO ++ mktemp + local LAST_ERR=/tmp/tmp.oL74zua2po + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wOMNvfWcXO perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.oL74zua2po + rm /tmp/tmp.wOMNvfWcXO /tmp/tmp.oL74zua2po + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aXK4YhICNa +++ mktemp ++ local LAST_ERR=/tmp/tmp.C8mb7pbV0S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aXK4YhICNa ++ cat /tmp/tmp.C8mb7pbV0S ++ rm /tmp/tmp.aXK4YhICNa /tmp/tmp.C8mb7pbV0S ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yC6NawjUhf +++ mktemp ++ local LAST_ERR=/tmp/tmp.rVKLr8hFBk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yC6NawjUhf ++ cat /tmp/tmp.rVKLr8hFBk ++ rm /tmp/tmp.yC6NawjUhf /tmp/tmp.rVKLr8hFBk ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-three-admin-db + local database=admin + local 'command=db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-three-admin-db + run_mongo 'use admin\n db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.F3NtCQ4hQF +++ mktemp ++ local LAST_ERR=/tmp/tmp.bMV7eIR82X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.F3NtCQ4hQF ++ cat /tmp/tmp.bMV7eIR82X ++ rm /tmp/tmp.F3NtCQ4hQF /tmp/tmp.bMV7eIR82X ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.6mrR3nTd59 ++ mktemp + local LAST_ERR=/tmp/tmp.2SCC2QZv87 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6mrR3nTd59 + cat /tmp/tmp.2SCC2QZv87 + rm /tmp/tmp.6mrR3nTd59 /tmp/tmp.2SCC2QZv87 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-three-admin-db.json /tmp/tmp.NBvEbPh8ry/user-three-admin-db + compare admin 'db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-two-update-roles + local database=admin + local 'command=db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-two-update-roles + run_mongo 'use admin\n db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mJVOPR48Wh +++ mktemp ++ local LAST_ERR=/tmp/tmp.wrbTOryoTB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mJVOPR48Wh ++ cat /tmp/tmp.wrbTOryoTB ++ rm /tmp/tmp.mJVOPR48Wh /tmp/tmp.wrbTOryoTB ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.FuXE8qO2ys ++ mktemp + local LAST_ERR=/tmp/tmp.kPr3NzMicy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FuXE8qO2ys + cat /tmp/tmp.kPr3NzMicy + rm /tmp/tmp.FuXE8qO2ys /tmp/tmp.kPr3NzMicy + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-two-update-roles.json /tmp/tmp.NBvEbPh8ry/user-two-update-roles + check_mongo_auth user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 + local uri=user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6hUVUL8ZGz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.V1E6v7rlrz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6hUVUL8ZGz +++ cat /tmp/tmp.V1E6v7rlrz +++ rm /tmp/tmp.6hUVUL8ZGz /tmp/tmp.V1E6v7rlrz +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-6zkzr ++ local mongo_flag=--quiet ++ [[ user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NSp9azO7Mp +++ mktemp ++ local LAST_ERR=/tmp/tmp.EEERsjgaT9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NSp9azO7Mp ++ cat /tmp/tmp.EEERsjgaT9 ++ rm /tmp/tmp.NSp9azO7Mp /tmp/tmp.EEERsjgaT9 ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 + local uri=user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 mongodb '' --quiet ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IR5huQC4bF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tv6tOGDHvM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.IR5huQC4bF +++ cat /tmp/tmp.tv6tOGDHvM +++ rm /tmp/tmp.IR5huQC4bF /tmp/tmp.tv6tOGDHvM +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-6zkzr ++ local mongo_flag=--quiet ++ [[ user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZBiEUCj5XX +++ mktemp ++ local LAST_ERR=/tmp/tmp.1rMwOqIcYA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZBiEUCj5XX ++ cat /tmp/tmp.1rMwOqIcYA ++ rm /tmp/tmp.ZBiEUCj5XX /tmp/tmp.1rMwOqIcYA ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'check new user created after updated user db via CR' + set +o xtrace ----------------------------------------------------------------------------------- check new user created after updated user db via CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"newDb", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.U84zCRCREr ++ mktemp + local LAST_ERR=/tmp/tmp.G3VKKEtPH8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"newDb", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U84zCRCREr perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.G3VKKEtPH8 + rm /tmp/tmp.U84zCRCREr /tmp/tmp.G3VKKEtPH8 + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TEgqBtYbal +++ mktemp ++ local LAST_ERR=/tmp/tmp.TmcST4sq7O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TEgqBtYbal ++ cat /tmp/tmp.TmcST4sq7O ++ rm /tmp/tmp.TEgqBtYbal /tmp/tmp.TmcST4sq7O ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2XTRP0ZmXJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.klRYj79x0c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2XTRP0ZmXJ ++ cat /tmp/tmp.klRYj79x0c ++ rm /tmp/tmp.2XTRP0ZmXJ /tmp/tmp.klRYj79x0c ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare newDb 'db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-three-newDb-db + local database=newDb + local 'command=db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-three-newDb-db + run_mongo 'use newDb\n db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use newDb\n db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5DZPOi0ehF +++ mktemp ++ local LAST_ERR=/tmp/tmp.GSIX0g1OiG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5DZPOi0ehF ++ cat /tmp/tmp.GSIX0g1OiG ++ rm /tmp/tmp.5DZPOi0ehF /tmp/tmp.GSIX0g1OiG ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use newDb\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.oEJPRMu8Ue ++ mktemp + local LAST_ERR=/tmp/tmp.ol8P9FDEJC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use newDb\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oEJPRMu8Ue + cat /tmp/tmp.ol8P9FDEJC + rm /tmp/tmp.oEJPRMu8Ue /tmp/tmp.ol8P9FDEJC + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-three-newDb-db.json /tmp/tmp.NBvEbPh8ry/user-three-newDb-db + compare admin 'db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-three-admin-db + local database=admin + local 'command=db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-three-admin-db + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' + run_mongo 'use admin\n db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2yPqRKymI7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZAzxG9jajk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2yPqRKymI7 ++ cat /tmp/tmp.ZAzxG9jajk ++ rm /tmp/tmp.2yPqRKymI7 /tmp/tmp.ZAzxG9jajk ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8MSTN4kZRT ++ mktemp + local LAST_ERR=/tmp/tmp.HU5lm2e9Cf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8MSTN4kZRT + cat /tmp/tmp.HU5lm2e9Cf + rm /tmp/tmp.8MSTN4kZRT /tmp/tmp.HU5lm2e9Cf + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-three-admin-db.json /tmp/tmp.NBvEbPh8ry/user-three-admin-db + desc 'check new user created with default db and secret password key' + set +o xtrace ----------------------------------------------------------------------------------- check new user created with default db and secret password key ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-four", "passwordSecretRef": { "name": "user-two" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.8jzD4j87zU ++ mktemp + local LAST_ERR=/tmp/tmp.1Acm8EJuwf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-four", "passwordSecretRef": { "name": "user-two" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8jzD4j87zU perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.1Acm8EJuwf + rm /tmp/tmp.8jzD4j87zU /tmp/tmp.1Acm8EJuwf + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KBtwhcgwPL +++ mktemp ++ local LAST_ERR=/tmp/tmp.9lInxR4Aew ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KBtwhcgwPL ++ cat /tmp/tmp.9lInxR4Aew ++ rm /tmp/tmp.KBtwhcgwPL /tmp/tmp.9lInxR4Aew ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WIoHVMXeFK +++ mktemp ++ local LAST_ERR=/tmp/tmp.V9ESGWz4lu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WIoHVMXeFK ++ cat /tmp/tmp.V9ESGWz4lu ++ rm /tmp/tmp.WIoHVMXeFK /tmp/tmp.V9ESGWz4lu ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getUser("user-four")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-four + local database=admin + local 'command=db.getUser("user-four")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-four + run_mongo 'use admin\n db.getUser("user-four")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getUser("user-four")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8UihqolZqb +++ mktemp ++ local LAST_ERR=/tmp/tmp.WAo3hs0SRt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8UihqolZqb ++ cat /tmp/tmp.WAo3hs0SRt ++ rm /tmp/tmp.8UihqolZqb /tmp/tmp.WAo3hs0SRt ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-four")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.I5IA6Xi1Pk ++ mktemp + local LAST_ERR=/tmp/tmp.DJVmyXG7K4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getUser("user-four")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I5IA6Xi1Pk + cat /tmp/tmp.DJVmyXG7K4 + rm /tmp/tmp.I5IA6Xi1Pk /tmp/tmp.DJVmyXG7K4 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-four.json /tmp/tmp.NBvEbPh8ry/user-four + desc 'check user role on cluster initialization' + set +o xtrace ----------------------------------------------------------------------------------- check user role on cluster initialization ----------------------------------------------------------------------------------- + compare admin 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 role-one + local database=admin + local 'command=db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=role-one + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + sed '/"userId"/d' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + run_mongo 'use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zQTQm1gr4B +++ mktemp ++ local LAST_ERR=/tmp/tmp.Uc08CqdoEB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zQTQm1gr4B ++ cat /tmp/tmp.Uc08CqdoEB ++ rm /tmp/tmp.zQTQm1gr4B /tmp/tmp.Uc08CqdoEB ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.TP0ixKlZT4 ++ mktemp + local LAST_ERR=/tmp/tmp.FSc6nJ33yL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TP0ixKlZT4 + cat /tmp/tmp.FSc6nJ33yL + rm /tmp/tmp.TP0ixKlZT4 /tmp/tmp.FSc6nJ33yL + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/role-one.json /tmp/tmp.NBvEbPh8ry/role-one + desc 'check role recreated after deleted from DB' + set +o xtrace ----------------------------------------------------------------------------------- check role recreated after deleted from DB ----------------------------------------------------------------------------------- + run_mongo 'use admin\n db.dropRole("role-one")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local 'command=use admin\n db.dropRole("role-one")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BTS89OWo2H +++ mktemp ++ local LAST_ERR=/tmp/tmp.OCvNcxz39W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BTS89OWo2H ++ cat /tmp/tmp.OCvNcxz39W ++ rm /tmp/tmp.BTS89OWo2H /tmp/tmp.OCvNcxz39W ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.dropRole("role-one")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jsuO5XPw8K ++ mktemp + local LAST_ERR=/tmp/tmp.4wrdoeMVXK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.dropRole("role-one")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jsuO5XPw8K Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("9883ca25-fcee-46f9-8c35-bb8e37ea8012") } Percona Server for MongoDB server version: v7.0.14-8 WARNING: shell and server versions do not match switched to db admin true bye + cat /tmp/tmp.4wrdoeMVXK + rm /tmp/tmp.jsuO5XPw8K /tmp/tmp.4wrdoeMVXK + return 0 + sleep 15 + compare admin 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 role-one + local database=admin + local 'command=db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=role-one + run_mongo 'use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + sed '/"userId"/d' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nx6bdnRPIP +++ mktemp ++ local LAST_ERR=/tmp/tmp.H5SAyw16JD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nx6bdnRPIP ++ cat /tmp/tmp.H5SAyw16JD ++ rm /tmp/tmp.nx6bdnRPIP /tmp/tmp.H5SAyw16JD ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jYPtyulUnH ++ mktemp + local LAST_ERR=/tmp/tmp.ASMsfXmytA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jYPtyulUnH + cat /tmp/tmp.ASMsfXmytA + rm /tmp/tmp.jYPtyulUnH /tmp/tmp.ASMsfXmytA + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/role-one.json /tmp/tmp.NBvEbPh8ry/role-one + desc 'delete initial role from CR and create a new one' + set +o xtrace ----------------------------------------------------------------------------------- delete initial role from CR and create a new one ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ], "roles": [ { "role": "read", "db": "admin" } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.FSJSt2oTci ++ mktemp + local LAST_ERR=/tmp/tmp.6EDCUfPASA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ], "roles": [ { "role": "read", "db": "admin" } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FSJSt2oTci perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.6EDCUfPASA + rm /tmp/tmp.FSJSt2oTci /tmp/tmp.6EDCUfPASA + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.THyP0iMALD +++ mktemp ++ local LAST_ERR=/tmp/tmp.FLCkHkvvR4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.THyP0iMALD ++ cat /tmp/tmp.FLCkHkvvR4 ++ rm /tmp/tmp.THyP0iMALD /tmp/tmp.FLCkHkvvR4 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l3MLcH6LXI +++ mktemp ++ local LAST_ERR=/tmp/tmp.M0kigUDLHu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l3MLcH6LXI ++ cat /tmp/tmp.M0kigUDLHu ++ rm /tmp/tmp.l3MLcH6LXI /tmp/tmp.M0kigUDLHu ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 role-one + local database=admin + local 'command=db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=role-one + run_mongo 'use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Phz2MthIGv +++ mktemp ++ local LAST_ERR=/tmp/tmp.fvEcdtvUym ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Phz2MthIGv ++ cat /tmp/tmp.fvEcdtvUym ++ rm /tmp/tmp.Phz2MthIGv /tmp/tmp.fvEcdtvUym ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.BYq9golQcL ++ mktemp + local LAST_ERR=/tmp/tmp.V7ZqvuKQvB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BYq9golQcL + cat /tmp/tmp.V7ZqvuKQvB + rm /tmp/tmp.BYq9golQcL /tmp/tmp.V7ZqvuKQvB + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/role-one.json /tmp/tmp.NBvEbPh8ry/role-one + compare admin 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 role-two + local database=admin + local 'command=db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=role-two + run_mongo 'use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ie17Ri9NXd +++ mktemp ++ local LAST_ERR=/tmp/tmp.kJAbGU6etM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ie17Ri9NXd ++ cat /tmp/tmp.kJAbGU6etM ++ rm /tmp/tmp.ie17Ri9NXd /tmp/tmp.kJAbGU6etM ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.oxQiledG1V ++ mktemp + local LAST_ERR=/tmp/tmp.N5kFf5qCHE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oxQiledG1V + cat /tmp/tmp.N5kFf5qCHE + rm /tmp/tmp.oxQiledG1V /tmp/tmp.N5kFf5qCHE + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/role-two.json /tmp/tmp.NBvEbPh8ry/role-two + desc 'check role update from CR' + set +o xtrace ----------------------------------------------------------------------------------- check role update from CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.ETPF1En124 ++ mktemp + local LAST_ERR=/tmp/tmp.UGkEYfkcjv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ETPF1En124 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.UGkEYfkcjv + rm /tmp/tmp.ETPF1En124 /tmp/tmp.UGkEYfkcjv + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.g32j4mReU6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QnRySWvnAu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.g32j4mReU6 ++ cat /tmp/tmp.QnRySWvnAu ++ rm /tmp/tmp.g32j4mReU6 /tmp/tmp.QnRySWvnAu ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MgY4STd0H9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6bZkQtYnog ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MgY4STd0H9 ++ cat /tmp/tmp.6bZkQtYnog ++ rm /tmp/tmp.MgY4STd0H9 /tmp/tmp.6bZkQtYnog ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 role-two-updated + local database=admin + local 'command=db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=role-two-updated + sed '/"userId"/d' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + run_mongo 'use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hp6WAfhPY2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SMgjVp9RSZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hp6WAfhPY2 ++ cat /tmp/tmp.SMgjVp9RSZ ++ rm /tmp/tmp.hp6WAfhPY2 /tmp/tmp.SMgjVp9RSZ ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.HR5jpd3SmD ++ mktemp + local LAST_ERR=/tmp/tmp.uoGkVUKo0x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HR5jpd3SmD + cat /tmp/tmp.uoGkVUKo0x + rm /tmp/tmp.HR5jpd3SmD /tmp/tmp.uoGkVUKo0x + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/role-two-updated.json /tmp/tmp.NBvEbPh8ry/role-two-updated + desc 'check role update from DB' + set +o xtrace ----------------------------------------------------------------------------------- check role update from DB ----------------------------------------------------------------------------------- + run_mongo 'use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local 'command=use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5VribhVPSr +++ mktemp ++ local LAST_ERR=/tmp/tmp.YNce1eWuk0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5VribhVPSr ++ cat /tmp/tmp.YNce1eWuk0 ++ rm /tmp/tmp.5VribhVPSr /tmp/tmp.YNce1eWuk0 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.gdmFOcPjNm ++ mktemp + local LAST_ERR=/tmp/tmp.tdfNNOWO9i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gdmFOcPjNm Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.custom-users-roles-21248.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("dbd3e328-ba70-46a8-97bf-20400041432a") } Percona Server for MongoDB server version: v7.0.14-8 WARNING: shell and server versions do not match switched to db admin bye + cat /tmp/tmp.tdfNNOWO9i + rm /tmp/tmp.gdmFOcPjNm /tmp/tmp.tdfNNOWO9i + return 0 + sleep 15 + compare admin 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 role-two-updated + local database=admin + local 'command=db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=role-two-updated + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + run_mongo 'use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + sed '/"userId"/d' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0UPTFCcUBj +++ mktemp ++ local LAST_ERR=/tmp/tmp.0h6y7z7q5Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0UPTFCcUBj ++ cat /tmp/tmp.0h6y7z7q5Z ++ rm /tmp/tmp.0UPTFCcUBj /tmp/tmp.0h6y7z7q5Z ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.2L2CcON33U ++ mktemp + local LAST_ERR=/tmp/tmp.rdkhIbuyaX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2L2CcON33U + cat /tmp/tmp.rdkhIbuyaX + rm /tmp/tmp.2L2CcON33U /tmp/tmp.rdkhIbuyaX + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/role-two-updated.json /tmp/tmp.NBvEbPh8ry/role-two-updated + desc 'check new role created after updated role name via CR' + set +o xtrace ----------------------------------------------------------------------------------- check new role created after updated role name via CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-three", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.x8ecnObZUo ++ mktemp + local LAST_ERR=/tmp/tmp.zrWSv310Jb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-three", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x8ecnObZUo perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.zrWSv310Jb + rm /tmp/tmp.x8ecnObZUo /tmp/tmp.zrWSv310Jb + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3NXPVLWxrl +++ mktemp ++ local LAST_ERR=/tmp/tmp.pTKEZ4UClR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3NXPVLWxrl ++ cat /tmp/tmp.pTKEZ4UClR ++ rm /tmp/tmp.3NXPVLWxrl /tmp/tmp.pTKEZ4UClR ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OaRzKi2s0z +++ mktemp ++ local LAST_ERR=/tmp/tmp.sbwRJphlBM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OaRzKi2s0z ++ cat /tmp/tmp.sbwRJphlBM ++ rm /tmp/tmp.OaRzKi2s0z /tmp/tmp.sbwRJphlBM ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 role-three + local database=admin + local 'command=db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=role-three + sed '/"userId"/d' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use admin\n db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use admin\n db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WGKDZgfDJz +++ mktemp ++ local LAST_ERR=/tmp/tmp.sCn3KhZ0xP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WGKDZgfDJz ++ cat /tmp/tmp.sCn3KhZ0xP ++ rm /tmp/tmp.WGKDZgfDJz /tmp/tmp.sCn3KhZ0xP ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.uJ000tgelH ++ mktemp + local LAST_ERR=/tmp/tmp.qVga0oGDdx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use admin\n db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uJ000tgelH + cat /tmp/tmp.qVga0oGDdx + rm /tmp/tmp.uJ000tgelH /tmp/tmp.qVga0oGDdx + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/role-three.json /tmp/tmp.NBvEbPh8ry/role-three + desc 'check creating multiple roles and the users in a single CR apply' + set +o xtrace ----------------------------------------------------------------------------------- check creating multiple roles and the users in a single CR apply ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": { "roles": [ { "role": "role-four", "db": "testAdmin1", "privileges": [ { "resource": { "db": "testAdmin1", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin1", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin1", "collection": "system.version" }, "actions": [ "find" ] } ] }, { "role": "role-five", "db": "testAdmin2", "privileges": [ { "resource": { "db": "testAdmin2", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin2", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin2", "collection": "system.version" }, "actions": [ "find" ] } ] } ], "users": [ { "name": "user-five", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-four", "db": "testAdmin1" }, { "name": "role-five", "db": "testAdmin2" } ] }, { "name": "user-six", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-five", "db": "testAdmin2" } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.6OdQNW3hWg ++ mktemp + local LAST_ERR=/tmp/tmp.aJhozdn2tY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": { "roles": [ { "role": "role-four", "db": "testAdmin1", "privileges": [ { "resource": { "db": "testAdmin1", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin1", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin1", "collection": "system.version" }, "actions": [ "find" ] } ] }, { "role": "role-five", "db": "testAdmin2", "privileges": [ { "resource": { "db": "testAdmin2", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin2", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin2", "collection": "system.version" }, "actions": [ "find" ] } ] } ], "users": [ { "name": "user-five", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-four", "db": "testAdmin1" }, { "name": "role-five", "db": "testAdmin2" } ] }, { "name": "user-six", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-five", "db": "testAdmin2" } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6OdQNW3hWg perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.aJhozdn2tY + rm /tmp/tmp.6OdQNW3hWg /tmp/tmp.aJhozdn2tY + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Lg7WEu1byh +++ mktemp ++ local LAST_ERR=/tmp/tmp.PaYC3bd7n6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Lg7WEu1byh ++ cat /tmp/tmp.PaYC3bd7n6 ++ rm /tmp/tmp.Lg7WEu1byh /tmp/tmp.PaYC3bd7n6 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NVCHVCZ5l7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QBhh61UsgO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NVCHVCZ5l7 ++ cat /tmp/tmp.QBhh61UsgO ++ rm /tmp/tmp.NVCHVCZ5l7 /tmp/tmp.QBhh61UsgO ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare testAdmin1 'db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 role-four + local database=testAdmin1 + local 'command=db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=role-four + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + sed '/"userId"/d' + run_mongo 'use testAdmin1\n db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use testAdmin1\n db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MnnFBQdeUB +++ mktemp ++ local LAST_ERR=/tmp/tmp.VNgNBz5CGW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MnnFBQdeUB ++ cat /tmp/tmp.VNgNBz5CGW ++ rm /tmp/tmp.MnnFBQdeUB /tmp/tmp.VNgNBz5CGW ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use testAdmin1\n db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.7YsQ960SJX ++ mktemp + local LAST_ERR=/tmp/tmp.iaZl86kLvh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use testAdmin1\n db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7YsQ960SJX + cat /tmp/tmp.iaZl86kLvh + rm /tmp/tmp.7YsQ960SJX /tmp/tmp.iaZl86kLvh + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/role-four.json /tmp/tmp.NBvEbPh8ry/role-four + compare testAdmin2 'db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 role-five + local database=testAdmin2 + local 'command=db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=role-five + run_mongo 'use testAdmin2\n db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use testAdmin2\n db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uju2KfcD3z +++ mktemp ++ local LAST_ERR=/tmp/tmp.6LKzy6KIjh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + sed '/"userId"/d' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Uju2KfcD3z ++ cat /tmp/tmp.6LKzy6KIjh ++ rm /tmp/tmp.Uju2KfcD3z /tmp/tmp.6LKzy6KIjh ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use testAdmin2\n db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jqlMlfxtE1 ++ mktemp + local LAST_ERR=/tmp/tmp.hzjbwfb6Zw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use testAdmin2\n db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jqlMlfxtE1 + cat /tmp/tmp.hzjbwfb6Zw + rm /tmp/tmp.jqlMlfxtE1 /tmp/tmp.hzjbwfb6Zw + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/role-five.json /tmp/tmp.NBvEbPh8ry/role-five + compare testAdmin 'db.getUser("user-five")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-five + local database=testAdmin + local 'command=db.getUser("user-five")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-five + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + run_mongo 'use testAdmin\n db.getUser("user-five")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use testAdmin\n db.getUser("user-five")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + sed '/"userId"/d' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.cgHK9Am3gv +++ mktemp ++ local LAST_ERR=/tmp/tmp.dlJjhF1VT9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cgHK9Am3gv ++ cat /tmp/tmp.dlJjhF1VT9 ++ rm /tmp/tmp.cgHK9Am3gv /tmp/tmp.dlJjhF1VT9 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use testAdmin\n db.getUser("user-five")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.iGFHruu9eE ++ mktemp + local LAST_ERR=/tmp/tmp.mc8hiJutIy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use testAdmin\n db.getUser("user-five")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iGFHruu9eE + cat /tmp/tmp.mc8hiJutIy + rm /tmp/tmp.iGFHruu9eE /tmp/tmp.mc8hiJutIy + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-five.json /tmp/tmp.NBvEbPh8ry/user-five + compare testAdmin 'db.getUser("user-six")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 user-six + local database=testAdmin + local 'command=db.getUser("user-six")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local target=user-six + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + sed '/"userId"/d' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + run_mongo 'use testAdmin\n db.getUser("user-six")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 mongodb + local 'command=use testAdmin\n db.getUser("user-six")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kLzvsELgqs +++ mktemp ++ local LAST_ERR=/tmp/tmp.BMkD3gr5GQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kLzvsELgqs ++ cat /tmp/tmp.BMkD3gr5GQ ++ rm /tmp/tmp.kLzvsELgqs /tmp/tmp.BMkD3gr5GQ ++ return 0 + local client_container=psmdb-client-6c585f8dbd-6zkzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use testAdmin\n db.getUser("user-six")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.96vYFx9V86 ++ mktemp + local LAST_ERR=/tmp/tmp.npzhbU3ZBa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-6zkzr -- bash -c 'printf '\''use testAdmin\n db.getUser("user-six")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-21248.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.96vYFx9V86 + cat /tmp/tmp.npzhbU3ZBa + rm /tmp/tmp.96vYFx9V86 /tmp/tmp.npzhbU3ZBa + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/e2e-tests/custom-users-roles/compare/user-six.json /tmp/tmp.NBvEbPh8ry/user-six + destroy custom-users-roles-21248 + local namespace=custom-users-roles-21248 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.kuezNtco8J ++ mktemp + local LAST_ERR=/tmp/tmp.IrjJMvfeUt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kuezNtco8J customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.IrjJMvfeUt + rm /tmp/tmp.kuezNtco8J /tmp/tmp.IrjJMvfeUt + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.eSvVGtma7i ++ mktemp + local LAST_ERR=/tmp/tmp.UZ82kL2E9d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eSvVGtma7i + cat /tmp/tmp.UZ82kL2E9d + rm /tmp/tmp.eSvVGtma7i /tmp/tmp.UZ82kL2E9d + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.xbPXkItWJy ++ mktemp + local LAST_ERR=/tmp/tmp.x9z3h0DOcB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xbPXkItWJy + cat /tmp/tmp.x9z3h0DOcB + rm /tmp/tmp.xbPXkItWJy /tmp/tmp.x9z3h0DOcB + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.6GT5aRxesj ++ mktemp + local LAST_ERR=/tmp/tmp.Cqgl3DxbVd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6GT5aRxesj + cat /tmp/tmp.Cqgl3DxbVd + rm /tmp/tmp.6GT5aRxesj /tmp/tmp.Cqgl3DxbVd + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.v7lizu1oeA ++ mktemp + local LAST_ERR=/tmp/tmp.3nPApK6tQm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1639/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.v7lizu1oeA clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.3nPApK6tQm + rm /tmp/tmp.v7lizu1oeA /tmp/tmp.3nPApK6tQm + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.LSgALGRdDq ++ mktemp + local LAST_ERR=/tmp/tmp.rYwgpU3EOr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.LSgALGRdDq + cat /tmp/tmp.rYwgpU3EOr Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.LSgALGRdDq + cat /tmp/tmp.rYwgpU3EOr Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.LSgALGRdDq + cat /tmp/tmp.rYwgpU3EOr Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.LSgALGRdDq + cat /tmp/tmp.rYwgpU3EOr Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.LSgALGRdDq /tmp/tmp.rYwgpU3EOr + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.NBvEbPh8ry + kubectl_bin delete --grace-period=0 --force=true namespace custom-users-roles-21248 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + local LAST_OUT=/tmp/tmp.1xuGzs6tAY ++ mktemp + local LAST_OUT=/tmp/tmp.JeFA0jTGJp ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.LCC4YWlImd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace custom-users-roles-21248 + local LAST_ERR=/tmp/tmp.Lrmebkk89M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator