Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/logs/custom-users-roles.log WARNING: version difference between client (1.31) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.31) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.31) and server (1.27) exceeds the supported minor version skew of +/-1 + psmdb=some-name + cluster=some-name-rs0 + create_infra custom-users-roles-6901 + local ns=custom-users-roles-6901 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.ORt8vhRSKV ++ mktemp + local LAST_ERR=/tmp/tmp.ub9EKbmj7A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ORt8vhRSKV customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.ub9EKbmj7A + rm /tmp/tmp.ORt8vhRSKV /tmp/tmp.ub9EKbmj7A + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.9SEwsWN8V2 ++ mktemp + local LAST_ERR=/tmp/tmp.Ruelkjxuy8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9SEwsWN8V2 + cat /tmp/tmp.Ruelkjxuy8 + rm /tmp/tmp.9SEwsWN8V2 /tmp/tmp.Ruelkjxuy8 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.hSWTUEii2R ++ mktemp + local LAST_ERR=/tmp/tmp.FHBMYWYh28 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hSWTUEii2R + cat /tmp/tmp.FHBMYWYh28 + rm /tmp/tmp.hSWTUEii2R /tmp/tmp.FHBMYWYh28 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.dtrLMjl2Rt ++ mktemp + local LAST_ERR=/tmp/tmp.BXzXeJRQ3v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dtrLMjl2Rt + cat /tmp/tmp.BXzXeJRQ3v + rm /tmp/tmp.dtrLMjl2Rt /tmp/tmp.BXzXeJRQ3v + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.WoyYX8XRAu ++ mktemp + local LAST_ERR=/tmp/tmp.egASXr6kbr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WoyYX8XRAu clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.egASXr6kbr + rm /tmp/tmp.WoyYX8XRAu /tmp/tmp.egASXr6kbr + return 0 + check_crd_for_deletion PR-1608-f10c3c44 + local git_tag=PR-1608-f10c3c44 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1608-f10c3c44/deploy/crd.yaml ++ /usr/bin/sed s/---//g ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tphdfU7gxF +++ mktemp ++ local LAST_ERR=/tmp/tmp.KmAHK4pe6t ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.tphdfU7gxF ++ cat /tmp/tmp.KmAHK4pe6t Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.tphdfU7gxF ++ cat /tmp/tmp.KmAHK4pe6t Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.tphdfU7gxF ++ cat /tmp/tmp.KmAHK4pe6t Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.tphdfU7gxF ++ cat /tmp/tmp.KmAHK4pe6t Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.tphdfU7gxF /tmp/tmp.KmAHK4pe6t ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrole ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.VgUoPhUmKc + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.ScuzkqpwQ8 ++ mktemp + local LAST_ERR=/tmp/tmp.C8wljB183k + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + awk '{print$1}' + local LAST_ERR=/tmp/tmp.tULawNmWXx + local exit_status=0 + local timeout=4 + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ScuzkqpwQ8 + cat /tmp/tmp.C8wljB183k + rm /tmp/tmp.ScuzkqpwQ8 /tmp/tmp.C8wljB183k + return 0 namespace "custom-users-roles-6376" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VgUoPhUmKc namespace "psmdb-operator" deleted + cat /tmp/tmp.tULawNmWXx + rm /tmp/tmp.VgUoPhUmKc /tmp/tmp.tULawNmWXx + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.sFLiaAhpVQ ++ mktemp + local LAST_ERR=/tmp/tmp.mkZnllcUDK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sFLiaAhpVQ + cat /tmp/tmp.mkZnllcUDK + rm /tmp/tmp.sFLiaAhpVQ /tmp/tmp.mkZnllcUDK + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.FwPfftmVD8 ++ mktemp + local LAST_ERR=/tmp/tmp.QDirhQ0zOy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FwPfftmVD8 namespace/psmdb-operator created + cat /tmp/tmp.QDirhQ0zOy + rm /tmp/tmp.FwPfftmVD8 /tmp/tmp.QDirhQ0zOy + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.GmmPIC3cah +++ mktemp ++ local LAST_ERR=/tmp/tmp.NrsNuSnHdH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GmmPIC3cah ++ cat /tmp/tmp.NrsNuSnHdH ++ rm /tmp/tmp.GmmPIC3cah /tmp/tmp.NrsNuSnHdH ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1608-f10c3c44-3-cluster5 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.e1WFZAx2Ik ++ mktemp + local LAST_ERR=/tmp/tmp.igVRyM7gi9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1608-f10c3c44-3-cluster5 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e1WFZAx2Ik Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1608-f10c3c44-3-cluster5" modified. + cat /tmp/tmp.igVRyM7gi9 + rm /tmp/tmp.e1WFZAx2Ik /tmp/tmp.igVRyM7gi9 + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.r1NLPAN4eS ++ mktemp + local LAST_ERR=/tmp/tmp.0fKcEFs03O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r1NLPAN4eS customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.0fKcEFs03O + rm /tmp/tmp.r1NLPAN4eS /tmp/tmp.0fKcEFs03O + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Blkm6seHv5 ++ mktemp + local LAST_ERR=/tmp/tmp.6IbX1UucPu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Blkm6seHv5 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.6IbX1UucPu + rm /tmp/tmp.Blkm6seHv5 /tmp/tmp.6IbX1UucPu + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1608-f10c3c44") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.uDLEgjHxA5 ++ mktemp + local LAST_ERR=/tmp/tmp.0bCmAlEurG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uDLEgjHxA5 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.0bCmAlEurG + rm /tmp/tmp.uDLEgjHxA5 /tmp/tmp.0bCmAlEurG + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.PfzsheQfvI +++ mktemp ++ local LAST_ERR=/tmp/tmp.wSVyupkKuC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PfzsheQfvI ++ cat /tmp/tmp.wSVyupkKuC ++ rm /tmp/tmp.PfzsheQfvI /tmp/tmp.wSVyupkKuC ++ return 0 + wait_pod percona-server-mongodb-operator-57b744448-v6ccl + local pod=percona-server-mongodb-operator-57b744448-v6ccl + set +o xtrace waiting for pod/percona-server-mongodb-operator-57b744448-v6ccl to be ready.OK + create_namespace custom-users-roles-6901 + local namespace=custom-users-roles-6901 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces custom-users-roles-6901' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces custom-users-roles-6901 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace custom-users-roles-6901 --ignore-not-found + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.gZPiHf1C54 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Dot2nATg4a + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.b8fP9gUWoR ++ seq 0 2 + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.PGeZK8TDhP + local exit_status=0 + local timeout=4 + xargs kubectl delete ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace custom-users-roles-6901 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gZPiHf1C54 + cat /tmp/tmp.Dot2nATg4a + rm /tmp/tmp.gZPiHf1C54 /tmp/tmp.Dot2nATg4a + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b8fP9gUWoR + cat /tmp/tmp.PGeZK8TDhP + rm /tmp/tmp.b8fP9gUWoR /tmp/tmp.PGeZK8TDhP + return 0 + kubectl_bin wait --for=delete namespace custom-users-roles-6901 ++ mktemp + local LAST_OUT=/tmp/tmp.eKlqKCjucj ++ mktemp + local LAST_ERR=/tmp/tmp.F6BLI0v7pu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace custom-users-roles-6901 namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eKlqKCjucj + cat /tmp/tmp.F6BLI0v7pu + rm /tmp/tmp.eKlqKCjucj /tmp/tmp.F6BLI0v7pu + return 0 + desc 'create namespace custom-users-roles-6901' + set +o xtrace ----------------------------------------------------------------------------------- create namespace custom-users-roles-6901 ----------------------------------------------------------------------------------- + kubectl_bin create namespace custom-users-roles-6901 ++ mktemp + local LAST_OUT=/tmp/tmp.axJkBRJEJq ++ mktemp + local LAST_ERR=/tmp/tmp.g0s5KAjaDo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace custom-users-roles-6901 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.axJkBRJEJq namespace/custom-users-roles-6901 created + cat /tmp/tmp.g0s5KAjaDo + rm /tmp/tmp.axJkBRJEJq /tmp/tmp.g0s5KAjaDo + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.v38CN9Xfsg +++ mktemp ++ local LAST_ERR=/tmp/tmp.bPWi3iN59e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v38CN9Xfsg ++ cat /tmp/tmp.bPWi3iN59e ++ rm /tmp/tmp.v38CN9Xfsg /tmp/tmp.bPWi3iN59e ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1608-f10c3c44-3-cluster5 --namespace=custom-users-roles-6901 ++ mktemp + local LAST_OUT=/tmp/tmp.FtLM7Zvb71 ++ mktemp + local LAST_ERR=/tmp/tmp.CNZidPTx26 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1608-f10c3c44-3-cluster5 --namespace=custom-users-roles-6901 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FtLM7Zvb71 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1608-f10c3c44-3-cluster5" modified. + cat /tmp/tmp.CNZidPTx26 + rm /tmp/tmp.FtLM7Zvb71 /tmp/tmp.CNZidPTx26 + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/conf/app-user-secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.wwlxwLghvM ++ mktemp + local LAST_ERR=/tmp/tmp.2LAgK22kB5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/conf/app-user-secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wwlxwLghvM deployment.apps/psmdb-client created secret/some-users created secret/user-one created secret/user-two created + cat /tmp/tmp.2LAgK22kB5 + rm /tmp/tmp.wwlxwLghvM /tmp/tmp.2LAgK22kB5 + return 0 + mongoUri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/conf/some-name-rs0.yml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/conf/some-name-rs0.yml + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1608-f10c3c44"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + local LAST_OUT=/tmp/tmp.RaBEeuvqwp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' ++ mktemp + local LAST_ERR=/tmp/tmp.bigXj0678K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RaBEeuvqwp perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.bigXj0678K + rm /tmp/tmp.RaBEeuvqwp /tmp/tmp.bigXj0678K + return 0 + desc 'Check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- Check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JALNLqDCSl +++ mktemp ++ local LAST_ERR=/tmp/tmp.8kB1aFzV64 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JALNLqDCSl ++ cat /tmp/tmp.8kB1aFzV64 ++ rm /tmp/tmp.JALNLqDCSl /tmp/tmp.8kB1aFzV64 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MsRhjp2UiN +++ mktemp ++ local LAST_ERR=/tmp/tmp.OpomRbkb6g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MsRhjp2UiN ++ cat /tmp/tmp.OpomRbkb6g ++ rm /tmp/tmp.MsRhjp2UiN /tmp/tmp.OpomRbkb6g ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.... + desc 'check user created on cluster creation' + set +o xtrace ----------------------------------------------------------------------------------- check user created on cluster creation ----------------------------------------------------------------------------------- + userOne=user-one ++ getSecretData user-one userOnePassKey ++ local secretName=user-one ++ local dataKey=userOnePassKey +++ kubectl get secrets/user-one '--template={{.data.userOnePassKey}}' +++ base64 -d ++ local data=clusterMonitor ++ echo clusterMonitor + userOnePass=clusterMonitor + compare admin 'db.getUser("user-one")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-one + local database=admin + local 'command=db.getUser("user-one")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-one + run_mongo 'use admin\n db.getUser("user-one")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n db.getUser("user-one")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LFYejPHnpQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.x0aSv2lwnC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LFYejPHnpQ ++ cat /tmp/tmp.x0aSv2lwnC ++ rm /tmp/tmp.LFYejPHnpQ /tmp/tmp.x0aSv2lwnC ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-one")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.sfCxWXfki2 ++ mktemp + local LAST_ERR=/tmp/tmp.DyzqvcacHh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-one")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sfCxWXfki2 + cat /tmp/tmp.DyzqvcacHh + rm /tmp/tmp.sfCxWXfki2 /tmp/tmp.DyzqvcacHh + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-one.json /tmp/tmp.dSbCT5hZXv/user-one + check_mongo_auth user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 + local uri=user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jUbDwzP66U ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RYEyHyqesk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jUbDwzP66U +++ cat /tmp/tmp.RYEyHyqesk +++ rm /tmp/tmp.jUbDwzP66U /tmp/tmp.RYEyHyqesk +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-l8w2c ++ local mongo_flag=--quiet ++ [[ user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WEatSJ8MgG +++ mktemp ++ local LAST_ERR=/tmp/tmp.6Ts9iVKRRH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WEatSJ8MgG ++ cat /tmp/tmp.6Ts9iVKRRH ++ rm /tmp/tmp.WEatSJ8MgG /tmp/tmp.6Ts9iVKRRH ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'delete initial user from CR and create a new one' + set +o xtrace ----------------------------------------------------------------------------------- delete initial user from CR and create a new one ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.jHW7YtYbHl ++ mktemp + local LAST_ERR=/tmp/tmp.G8X8poaF0Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jHW7YtYbHl perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.G8X8poaF0Y + rm /tmp/tmp.jHW7YtYbHl /tmp/tmp.G8X8poaF0Y + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oxNGXTDw9L +++ mktemp ++ local LAST_ERR=/tmp/tmp.tEEA93VRff ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oxNGXTDw9L ++ cat /tmp/tmp.tEEA93VRff ++ rm /tmp/tmp.oxNGXTDw9L /tmp/tmp.tEEA93VRff ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Tp5GiiRlZg +++ mktemp ++ local LAST_ERR=/tmp/tmp.DqqjiboSsb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Tp5GiiRlZg ++ cat /tmp/tmp.DqqjiboSsb ++ rm /tmp/tmp.Tp5GiiRlZg /tmp/tmp.DqqjiboSsb ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-two + local database=admin + local 'command=db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-two + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + run_mongo 'use admin\n db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + sed '/"userId"/d' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iWGKTK0yCc +++ mktemp ++ local LAST_ERR=/tmp/tmp.yFbR2XjVYs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iWGKTK0yCc ++ cat /tmp/tmp.yFbR2XjVYs ++ rm /tmp/tmp.iWGKTK0yCc /tmp/tmp.yFbR2XjVYs ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jXHAgRGaGS ++ mktemp + local LAST_ERR=/tmp/tmp.86RJswXiZb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jXHAgRGaGS + cat /tmp/tmp.86RJswXiZb + rm /tmp/tmp.jXHAgRGaGS /tmp/tmp.86RJswXiZb + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-two.json /tmp/tmp.dSbCT5hZXv/user-two + userTwo=user-two ++ getSecretData user-two userTwoPassKey ++ local secretName=user-two ++ local dataKey=userTwoPassKey +++ kubectl get secrets/user-two '--template={{.data.userTwoPassKey}}' +++ base64 -d ++ local data=clusterMonitor ++ echo clusterMonitor + userTwoPass=clusterMonitor + check_mongo_auth user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 + local uri=user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mcUxLDxG7M ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fSzlZhmJoc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.mcUxLDxG7M +++ cat /tmp/tmp.fSzlZhmJoc +++ rm /tmp/tmp.mcUxLDxG7M /tmp/tmp.fSzlZhmJoc +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-l8w2c ++ local mongo_flag=--quiet ++ [[ user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q1occYyWAb +++ mktemp ++ local LAST_ERR=/tmp/tmp.5zGP7zT532 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q1occYyWAb ++ cat /tmp/tmp.5zGP7zT532 ++ rm /tmp/tmp.Q1occYyWAb /tmp/tmp.5zGP7zT532 ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 + local uri=user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Sq2rvlLEvd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9MphZu4v6S +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Sq2rvlLEvd +++ cat /tmp/tmp.9MphZu4v6S +++ rm /tmp/tmp.Sq2rvlLEvd /tmp/tmp.9MphZu4v6S +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-l8w2c ++ local mongo_flag=--quiet ++ [[ user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qOKbh6WM0a +++ mktemp ++ local LAST_ERR=/tmp/tmp.zHwwbQAHeP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qOKbh6WM0a ++ cat /tmp/tmp.zHwwbQAHeP ++ rm /tmp/tmp.qOKbh6WM0a /tmp/tmp.zHwwbQAHeP ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'check password change' + set +o xtrace ----------------------------------------------------------------------------------- check password change ----------------------------------------------------------------------------------- + userTwoNewPass=new-user-two-password ++ base64 ++ echo -n new-user-two-password + patch_secret user-two userTwoPassKey bmV3LXVzZXItdHdvLXBhc3N3b3Jk + local secret=user-two + local key=userTwoPassKey + local value=bmV3LXVzZXItdHdvLXBhc3N3b3Jk + kubectl patch secret user-two '-p={"data":{"userTwoPassKey": "bmV3LXVzZXItdHdvLXBhc3N3b3Jk"}}' secret/user-two patched + sleep 20 + check_mongo_auth user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 + local uri=user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local uri=user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4gw1j3YhD4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EllLo4yuUg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4gw1j3YhD4 +++ cat /tmp/tmp.EllLo4yuUg +++ rm /tmp/tmp.4gw1j3YhD4 /tmp/tmp.EllLo4yuUg +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-l8w2c ++ local mongo_flag=--quiet ++ [[ user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yYpdOJFcjA +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qbvysrw54z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yYpdOJFcjA ++ cat /tmp/tmp.Qbvysrw54z ++ rm /tmp/tmp.yYpdOJFcjA /tmp/tmp.Qbvysrw54z ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'check user roles update from CR' + set +o xtrace ----------------------------------------------------------------------------------- check user roles update from CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.fFrrCjkxWz ++ mktemp + local LAST_ERR=/tmp/tmp.zQCxH3YytM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fFrrCjkxWz perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.zQCxH3YytM + rm /tmp/tmp.fFrrCjkxWz /tmp/tmp.zQCxH3YytM + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e7RN36w2G8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.k9x8xjGEjr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e7RN36w2G8 ++ cat /tmp/tmp.k9x8xjGEjr ++ rm /tmp/tmp.e7RN36w2G8 /tmp/tmp.k9x8xjGEjr ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q21j5jHnhd +++ mktemp ++ local LAST_ERR=/tmp/tmp.JatYWwwWM0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q21j5jHnhd ++ cat /tmp/tmp.JatYWwwWM0 ++ rm /tmp/tmp.q21j5jHnhd /tmp/tmp.JatYWwwWM0 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-two-update-roles + local database=admin + local 'command=db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-two-update-roles + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use admin\n db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gHoPgBp9bO +++ mktemp + sed '/"userId"/d' ++ local LAST_ERR=/tmp/tmp.0xlGCdv58l ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gHoPgBp9bO ++ cat /tmp/tmp.0xlGCdv58l ++ rm /tmp/tmp.gHoPgBp9bO /tmp/tmp.0xlGCdv58l ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Q7jze7B9Ft ++ mktemp + local LAST_ERR=/tmp/tmp.Fmm4AXibM7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Q7jze7B9Ft + cat /tmp/tmp.Fmm4AXibM7 + rm /tmp/tmp.Q7jze7B9Ft /tmp/tmp.Fmm4AXibM7 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-two-update-roles.json /tmp/tmp.dSbCT5hZXv/user-two-update-roles + desc 'check user roles update from DB' + set +o xtrace ----------------------------------------------------------------------------------- check user roles update from DB ----------------------------------------------------------------------------------- + run_mongo 'use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local 'command=use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Oj7CPXkeWa +++ mktemp ++ local LAST_ERR=/tmp/tmp.u1Cwq7xHGI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Oj7CPXkeWa ++ cat /tmp/tmp.u1Cwq7xHGI ++ rm /tmp/tmp.Oj7CPXkeWa /tmp/tmp.u1Cwq7xHGI ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.X4WeHdJ1Cq ++ mktemp + local LAST_ERR=/tmp/tmp.kqYfESHx0o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X4WeHdJ1Cq Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c67afe02-0863-4503-9530-d71c8e9a6517") } Percona Server for MongoDB server version: v7.0.14-8 WARNING: shell and server versions do not match switched to db admin bye + cat /tmp/tmp.kqYfESHx0o + rm /tmp/tmp.X4WeHdJ1Cq /tmp/tmp.kqYfESHx0o + return 0 + sleep 15 + compare admin 'db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-two-update-roles + local database=admin + local 'command=db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-two-update-roles + run_mongo 'use admin\n db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nLJ2eKCBKp +++ mktemp ++ local LAST_ERR=/tmp/tmp.KRWXcRTrTy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nLJ2eKCBKp ++ cat /tmp/tmp.KRWXcRTrTy ++ rm /tmp/tmp.nLJ2eKCBKp /tmp/tmp.KRWXcRTrTy ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.39eHD887eu ++ mktemp + local LAST_ERR=/tmp/tmp.j6Vwz9HYSI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.39eHD887eu + cat /tmp/tmp.j6Vwz9HYSI + rm /tmp/tmp.39eHD887eu /tmp/tmp.j6Vwz9HYSI + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-two-update-roles.json /tmp/tmp.dSbCT5hZXv/user-two-update-roles + desc 'check user recreated after deleted from DB' + set +o xtrace ----------------------------------------------------------------------------------- check user recreated after deleted from DB ----------------------------------------------------------------------------------- + run_mongo 'use admin\n db.dropUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local 'command=use admin\n db.dropUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BQKJ3PX7ob +++ mktemp ++ local LAST_ERR=/tmp/tmp.KeeCMmnJyZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BQKJ3PX7ob ++ cat /tmp/tmp.KeeCMmnJyZ ++ rm /tmp/tmp.BQKJ3PX7ob /tmp/tmp.KeeCMmnJyZ ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.dropUser("user-two")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.rxoSx9EXCo ++ mktemp + local LAST_ERR=/tmp/tmp.kw1eD4ia1V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.dropUser("user-two")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rxoSx9EXCo Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ced9e023-5a5d-447d-999b-ba7b7973a3f1") } Percona Server for MongoDB server version: v7.0.14-8 WARNING: shell and server versions do not match switched to db admin true bye + cat /tmp/tmp.kw1eD4ia1V + rm /tmp/tmp.rxoSx9EXCo /tmp/tmp.kw1eD4ia1V + return 0 + sleep 15 + compare admin 'db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-two-update-roles + local database=admin + local 'command=db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-two-update-roles + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + run_mongo 'use admin\n db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + sed '/"userId"/d' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MkWFBOp3nw +++ mktemp ++ local LAST_ERR=/tmp/tmp.COTEygPgpA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MkWFBOp3nw ++ cat /tmp/tmp.COTEygPgpA ++ rm /tmp/tmp.MkWFBOp3nw /tmp/tmp.COTEygPgpA ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.wWuLRKBB00 ++ mktemp + local LAST_ERR=/tmp/tmp.F6vEAzSMIY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wWuLRKBB00 + cat /tmp/tmp.F6vEAzSMIY + rm /tmp/tmp.wWuLRKBB00 /tmp/tmp.F6vEAzSMIY + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-two-update-roles.json /tmp/tmp.dSbCT5hZXv/user-two-update-roles + desc 'check new user created after updated user name via CR' + set +o xtrace ----------------------------------------------------------------------------------- check new user created after updated user name via CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.QVO2GjJDOS ++ mktemp + local LAST_ERR=/tmp/tmp.r1lOakFOjq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QVO2GjJDOS perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.r1lOakFOjq + rm /tmp/tmp.QVO2GjJDOS /tmp/tmp.r1lOakFOjq + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nDvAXXRp1q +++ mktemp ++ local LAST_ERR=/tmp/tmp.p4eNUUbDxu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nDvAXXRp1q ++ cat /tmp/tmp.p4eNUUbDxu ++ rm /tmp/tmp.nDvAXXRp1q /tmp/tmp.p4eNUUbDxu ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AxR9sFeXfi +++ mktemp ++ local LAST_ERR=/tmp/tmp.On4WRMkDuA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AxR9sFeXfi ++ cat /tmp/tmp.On4WRMkDuA ++ rm /tmp/tmp.AxR9sFeXfi /tmp/tmp.On4WRMkDuA ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-three-admin-db + local database=admin + local 'command=db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-three-admin-db + run_mongo 'use admin\n db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JaN6tII2fT +++ mktemp ++ local LAST_ERR=/tmp/tmp.j71UU9oqhU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JaN6tII2fT ++ cat /tmp/tmp.j71UU9oqhU ++ rm /tmp/tmp.JaN6tII2fT /tmp/tmp.j71UU9oqhU ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.mGeZal0URv ++ mktemp + local LAST_ERR=/tmp/tmp.Wk4Lk4t33h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mGeZal0URv + cat /tmp/tmp.Wk4Lk4t33h + rm /tmp/tmp.mGeZal0URv /tmp/tmp.Wk4Lk4t33h + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-three-admin-db.json /tmp/tmp.dSbCT5hZXv/user-three-admin-db + compare admin 'db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-two-update-roles + local database=admin + local 'command=db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-two-update-roles + run_mongo 'use admin\n db.getUser("user-two")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n db.getUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.higImMre5M +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lauqg0PQKm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.higImMre5M ++ cat /tmp/tmp.Lauqg0PQKm ++ rm /tmp/tmp.higImMre5M /tmp/tmp.Lauqg0PQKm ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.rK2vnD8YVB ++ mktemp + local LAST_ERR=/tmp/tmp.4AuxLMjFZB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rK2vnD8YVB + cat /tmp/tmp.4AuxLMjFZB + rm /tmp/tmp.rK2vnD8YVB /tmp/tmp.4AuxLMjFZB + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-two-update-roles.json /tmp/tmp.dSbCT5hZXv/user-two-update-roles + check_mongo_auth user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 + local uri=user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 mongodb '' --quiet ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wSTN9PKD1E ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1ruxsRRfPp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wSTN9PKD1E +++ cat /tmp/tmp.1ruxsRRfPp +++ rm /tmp/tmp.wSTN9PKD1E /tmp/tmp.1ruxsRRfPp +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-l8w2c ++ local mongo_flag=--quiet ++ [[ user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CIwAkXDEQt +++ mktemp ++ local LAST_ERR=/tmp/tmp.RQ98bAnG77 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CIwAkXDEQt ++ cat /tmp/tmp.RQ98bAnG77 ++ rm /tmp/tmp.CIwAkXDEQt /tmp/tmp.RQ98bAnG77 ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 + local uri=user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local uri=user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gXUdgawaG4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.P7Bmb5MzE6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gXUdgawaG4 +++ cat /tmp/tmp.P7Bmb5MzE6 +++ rm /tmp/tmp.gXUdgawaG4 /tmp/tmp.P7Bmb5MzE6 +++ return 0 ++ local client_container=psmdb-client-6c585f8dbd-l8w2c ++ local mongo_flag=--quiet ++ [[ user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fwodr1Z5jI +++ mktemp ++ local LAST_ERR=/tmp/tmp.wlLE06a7Kq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-three:new-user-two-password@some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fwodr1Z5jI ++ cat /tmp/tmp.wlLE06a7Kq ++ rm /tmp/tmp.Fwodr1Z5jI /tmp/tmp.wlLE06a7Kq ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'check new user created after updated user db via CR' + set +o xtrace ----------------------------------------------------------------------------------- check new user created after updated user db via CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"newDb", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.7cEEKZamxd ++ mktemp + local LAST_ERR=/tmp/tmp.YbMIsqsuQ8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"newDb", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7cEEKZamxd perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.YbMIsqsuQ8 + rm /tmp/tmp.7cEEKZamxd /tmp/tmp.YbMIsqsuQ8 + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.in9zXMWxce +++ mktemp ++ local LAST_ERR=/tmp/tmp.YMxLyJvVHp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.in9zXMWxce ++ cat /tmp/tmp.YMxLyJvVHp ++ rm /tmp/tmp.in9zXMWxce /tmp/tmp.YMxLyJvVHp ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4I8UjGCOfl +++ mktemp ++ local LAST_ERR=/tmp/tmp.vY365prltP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4I8UjGCOfl ++ cat /tmp/tmp.vY365prltP ++ rm /tmp/tmp.4I8UjGCOfl /tmp/tmp.vY365prltP ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare newDb 'db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-three-newDb-db + local database=newDb + local 'command=db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-three-newDb-db + run_mongo 'use newDb\n db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use newDb\n db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.a5PlOZbGSo +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xw6BkDfQq8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a5PlOZbGSo ++ cat /tmp/tmp.Xw6BkDfQq8 ++ rm /tmp/tmp.a5PlOZbGSo /tmp/tmp.Xw6BkDfQq8 ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use newDb\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.o75T8crr8b ++ mktemp + local LAST_ERR=/tmp/tmp.zk2X7dzcvo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use newDb\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o75T8crr8b + cat /tmp/tmp.zk2X7dzcvo + rm /tmp/tmp.o75T8crr8b /tmp/tmp.zk2X7dzcvo + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-three-newDb-db.json /tmp/tmp.dSbCT5hZXv/user-three-newDb-db + compare admin 'db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-three-admin-db + local database=admin + local 'command=db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-three-admin-db + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + sed '/"userId"/d' + run_mongo 'use admin\n db.getUser("user-three")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getUser("user-three")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JK0AuvEbz6 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' +++ mktemp ++ local LAST_ERR=/tmp/tmp.vunwOO14Zm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JK0AuvEbz6 ++ cat /tmp/tmp.vunwOO14Zm ++ rm /tmp/tmp.JK0AuvEbz6 /tmp/tmp.vunwOO14Zm ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.srTjLoXwJW ++ mktemp + local LAST_ERR=/tmp/tmp.HoFESGH5e9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-three")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.srTjLoXwJW + cat /tmp/tmp.HoFESGH5e9 + rm /tmp/tmp.srTjLoXwJW /tmp/tmp.HoFESGH5e9 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-three-admin-db.json /tmp/tmp.dSbCT5hZXv/user-three-admin-db + desc 'check new user created with default db and secret password key' + set +o xtrace ----------------------------------------------------------------------------------- check new user created with default db and secret password key ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-four", "passwordSecretRef": { "name": "user-two" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.M1KbLH0F0Z ++ mktemp + local LAST_ERR=/tmp/tmp.wcGbvsNBTp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-four", "passwordSecretRef": { "name": "user-two" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M1KbLH0F0Z perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.wcGbvsNBTp + rm /tmp/tmp.M1KbLH0F0Z /tmp/tmp.wcGbvsNBTp + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eIj3ZrgxzL +++ mktemp ++ local LAST_ERR=/tmp/tmp.VbpFzHXvvJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eIj3ZrgxzL ++ cat /tmp/tmp.VbpFzHXvvJ ++ rm /tmp/tmp.eIj3ZrgxzL /tmp/tmp.VbpFzHXvvJ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RJUBb6iN7t +++ mktemp ++ local LAST_ERR=/tmp/tmp.E7p63lTBf1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RJUBb6iN7t ++ cat /tmp/tmp.E7p63lTBf1 ++ rm /tmp/tmp.RJUBb6iN7t /tmp/tmp.E7p63lTBf1 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getUser("user-four")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-four + local database=admin + local 'command=db.getUser("user-four")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-four + sed '/"userId"/d' + run_mongo 'use admin\n db.getUser("user-four")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getUser("user-four")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.McdnOQlsoF +++ mktemp ++ local LAST_ERR=/tmp/tmp.rMwtvk9k2K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.McdnOQlsoF ++ cat /tmp/tmp.rMwtvk9k2K ++ rm /tmp/tmp.McdnOQlsoF /tmp/tmp.rMwtvk9k2K ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-four")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.IJVmm1Iwij ++ mktemp + local LAST_ERR=/tmp/tmp.dl4DZOgZEo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getUser("user-four")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IJVmm1Iwij + cat /tmp/tmp.dl4DZOgZEo + rm /tmp/tmp.IJVmm1Iwij /tmp/tmp.dl4DZOgZEo + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-four.json /tmp/tmp.dSbCT5hZXv/user-four + desc 'check user role on cluster initialization' + set +o xtrace ----------------------------------------------------------------------------------- check user role on cluster initialization ----------------------------------------------------------------------------------- + compare admin 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 role-one + local database=admin + local 'command=db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=role-one + run_mongo 'use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + sed '/"userId"/d' ++ local LAST_OUT=/tmp/tmp.5jiAszfbBF +++ mktemp ++ local LAST_ERR=/tmp/tmp.iIO9V3qAhn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5jiAszfbBF ++ cat /tmp/tmp.iIO9V3qAhn ++ rm /tmp/tmp.5jiAszfbBF /tmp/tmp.iIO9V3qAhn ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0O2UAAjXlf ++ mktemp + local LAST_ERR=/tmp/tmp.u7F7ebOVBF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0O2UAAjXlf + cat /tmp/tmp.u7F7ebOVBF + rm /tmp/tmp.0O2UAAjXlf /tmp/tmp.u7F7ebOVBF + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/role-one.json /tmp/tmp.dSbCT5hZXv/role-one + desc 'check role recreated after deleted from DB' + set +o xtrace ----------------------------------------------------------------------------------- check role recreated after deleted from DB ----------------------------------------------------------------------------------- + run_mongo 'use admin\n db.dropRole("role-one")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local 'command=use admin\n db.dropRole("role-one")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TOYEDnf2EC +++ mktemp ++ local LAST_ERR=/tmp/tmp.xo4XFWlj5D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TOYEDnf2EC ++ cat /tmp/tmp.xo4XFWlj5D ++ rm /tmp/tmp.TOYEDnf2EC /tmp/tmp.xo4XFWlj5D ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.dropRole("role-one")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.TS6eKBjXR2 ++ mktemp + local LAST_ERR=/tmp/tmp.hhnzJa8sPC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.dropRole("role-one")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TS6eKBjXR2 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("5e4dbaa2-f861-483f-b42a-b1de1279cf59") } Percona Server for MongoDB server version: v7.0.14-8 WARNING: shell and server versions do not match switched to db admin true bye + cat /tmp/tmp.hhnzJa8sPC + rm /tmp/tmp.TS6eKBjXR2 /tmp/tmp.hhnzJa8sPC + return 0 + sleep 15 + compare admin 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 role-one + local database=admin + local 'command=db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=role-one + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' + run_mongo 'use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w4dMH30UAz +++ mktemp ++ local LAST_ERR=/tmp/tmp.9wDzbcRIzy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w4dMH30UAz ++ cat /tmp/tmp.9wDzbcRIzy ++ rm /tmp/tmp.w4dMH30UAz /tmp/tmp.9wDzbcRIzy ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.diNfNuC6oK ++ mktemp + local LAST_ERR=/tmp/tmp.PhlGiptuft + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.diNfNuC6oK + cat /tmp/tmp.PhlGiptuft + rm /tmp/tmp.diNfNuC6oK /tmp/tmp.PhlGiptuft + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/role-one.json /tmp/tmp.dSbCT5hZXv/role-one + desc 'delete initial role from CR and create a new one' + set +o xtrace ----------------------------------------------------------------------------------- delete initial role from CR and create a new one ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ], "roles": [ { "role": "read", "db": "admin" } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.UxG8jqYpll ++ mktemp + local LAST_ERR=/tmp/tmp.xBMvlNV0Tr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ], "roles": [ { "role": "read", "db": "admin" } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UxG8jqYpll perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.xBMvlNV0Tr + rm /tmp/tmp.UxG8jqYpll /tmp/tmp.xBMvlNV0Tr + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7xXK4LtafI +++ mktemp ++ local LAST_ERR=/tmp/tmp.NT6P07ZnXn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7xXK4LtafI ++ cat /tmp/tmp.NT6P07ZnXn ++ rm /tmp/tmp.7xXK4LtafI /tmp/tmp.NT6P07ZnXn ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TYv1uRStU1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5n2iyzUiQj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TYv1uRStU1 ++ cat /tmp/tmp.5n2iyzUiQj ++ rm /tmp/tmp.TYv1uRStU1 /tmp/tmp.5n2iyzUiQj ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 role-one + local database=admin + local 'command=db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=role-one + run_mongo 'use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + sed '/"userId"/d' ++ local LAST_OUT=/tmp/tmp.wOHwN4rrFT +++ mktemp ++ local LAST_ERR=/tmp/tmp.sjLF3YYDsE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wOHwN4rrFT ++ cat /tmp/tmp.sjLF3YYDsE ++ rm /tmp/tmp.wOHwN4rrFT /tmp/tmp.sjLF3YYDsE ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.qdfe1YzeeI ++ mktemp + local LAST_ERR=/tmp/tmp.M84CyPhNOq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qdfe1YzeeI + cat /tmp/tmp.M84CyPhNOq + rm /tmp/tmp.qdfe1YzeeI /tmp/tmp.M84CyPhNOq + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/role-one.json /tmp/tmp.dSbCT5hZXv/role-one + compare admin 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 role-two + local database=admin + local 'command=db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=role-two + run_mongo 'use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zIDyC5yVWS +++ mktemp ++ local LAST_ERR=/tmp/tmp.u27t5zswCZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zIDyC5yVWS ++ cat /tmp/tmp.u27t5zswCZ ++ rm /tmp/tmp.zIDyC5yVWS /tmp/tmp.u27t5zswCZ ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.WKQrp5Unar ++ mktemp + local LAST_ERR=/tmp/tmp.PuwdbnzUEV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WKQrp5Unar + cat /tmp/tmp.PuwdbnzUEV + rm /tmp/tmp.WKQrp5Unar /tmp/tmp.PuwdbnzUEV + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/role-two.json /tmp/tmp.dSbCT5hZXv/role-two + desc 'check role update from CR' + set +o xtrace ----------------------------------------------------------------------------------- check role update from CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.UPFN7YxFbq ++ mktemp + local LAST_ERR=/tmp/tmp.JItDiZizEO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UPFN7YxFbq perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.JItDiZizEO + rm /tmp/tmp.UPFN7YxFbq /tmp/tmp.JItDiZizEO + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ydLjncfOiD +++ mktemp ++ local LAST_ERR=/tmp/tmp.IUaJejxLu0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ydLjncfOiD ++ cat /tmp/tmp.IUaJejxLu0 ++ rm /tmp/tmp.ydLjncfOiD /tmp/tmp.IUaJejxLu0 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YgNx2kqqdU +++ mktemp ++ local LAST_ERR=/tmp/tmp.vOxHA3TiMq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YgNx2kqqdU ++ cat /tmp/tmp.vOxHA3TiMq ++ rm /tmp/tmp.YgNx2kqqdU /tmp/tmp.vOxHA3TiMq ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 role-two-updated + local database=admin + local 'command=db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=role-two-updated + run_mongo 'use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yLh90RsuvC +++ mktemp ++ local LAST_ERR=/tmp/tmp.prUGC4qPGh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yLh90RsuvC ++ cat /tmp/tmp.prUGC4qPGh ++ rm /tmp/tmp.yLh90RsuvC /tmp/tmp.prUGC4qPGh ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.h2ZFmhhpIM ++ mktemp + local LAST_ERR=/tmp/tmp.1ORG0JEsV9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.h2ZFmhhpIM + cat /tmp/tmp.1ORG0JEsV9 + rm /tmp/tmp.h2ZFmhhpIM /tmp/tmp.1ORG0JEsV9 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/role-two-updated.json /tmp/tmp.dSbCT5hZXv/role-two-updated + desc 'check role update from DB' + set +o xtrace ----------------------------------------------------------------------------------- check role update from DB ----------------------------------------------------------------------------------- + run_mongo 'use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local 'command=use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zah4ZmqNgl +++ mktemp ++ local LAST_ERR=/tmp/tmp.PJIbcOiqXc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zah4ZmqNgl ++ cat /tmp/tmp.PJIbcOiqXc ++ rm /tmp/tmp.zah4ZmqNgl /tmp/tmp.PJIbcOiqXc ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DJPC13x5t3 ++ mktemp + local LAST_ERR=/tmp/tmp.OyQQ36UbEt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DJPC13x5t3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.custom-users-roles-6901.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e10cd797-1712-4371-ad05-2f48ccb5268a") } Percona Server for MongoDB server version: v7.0.14-8 WARNING: shell and server versions do not match switched to db admin bye + cat /tmp/tmp.OyQQ36UbEt + rm /tmp/tmp.DJPC13x5t3 /tmp/tmp.OyQQ36UbEt + return 0 + sleep 15 + compare admin 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 role-two-updated + local database=admin + local 'command=db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=role-two-updated + run_mongo 'use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cyH9m2XwJ5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.diISosvoFq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 + sed '/"userId"/d' ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cyH9m2XwJ5 ++ cat /tmp/tmp.diISosvoFq ++ rm /tmp/tmp.cyH9m2XwJ5 /tmp/tmp.diISosvoFq ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.hchSstB6rn ++ mktemp + local LAST_ERR=/tmp/tmp.6delixofbJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hchSstB6rn + cat /tmp/tmp.6delixofbJ + rm /tmp/tmp.hchSstB6rn /tmp/tmp.6delixofbJ + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/role-two-updated.json /tmp/tmp.dSbCT5hZXv/role-two-updated + desc 'check new role created after updated role name via CR' + set +o xtrace ----------------------------------------------------------------------------------- check new role created after updated role name via CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-three", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.uYOP2akmHQ ++ mktemp + local LAST_ERR=/tmp/tmp.CEnqVQ5AjI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-three", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uYOP2akmHQ perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.CEnqVQ5AjI + rm /tmp/tmp.uYOP2akmHQ /tmp/tmp.CEnqVQ5AjI + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TP9f1d0khZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.bwc6hXyyVF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TP9f1d0khZ ++ cat /tmp/tmp.bwc6hXyyVF ++ rm /tmp/tmp.TP9f1d0khZ /tmp/tmp.bwc6hXyyVF ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JuynAfIPfN +++ mktemp ++ local LAST_ERR=/tmp/tmp.qGU0SvQzCZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JuynAfIPfN ++ cat /tmp/tmp.qGU0SvQzCZ ++ rm /tmp/tmp.JuynAfIPfN /tmp/tmp.qGU0SvQzCZ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare admin 'db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 role-three + local database=admin + local 'command=db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=role-three + sed '/"userId"/d' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + run_mongo 'use admin\n db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use admin\n db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XLQT5DLSZc +++ mktemp ++ local LAST_ERR=/tmp/tmp.JDtqy4ibQf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XLQT5DLSZc ++ cat /tmp/tmp.JDtqy4ibQf ++ rm /tmp/tmp.XLQT5DLSZc /tmp/tmp.JDtqy4ibQf ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8UIZOPlF0L ++ mktemp + local LAST_ERR=/tmp/tmp.y2Zl2MmsJe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use admin\n db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8UIZOPlF0L + cat /tmp/tmp.y2Zl2MmsJe + rm /tmp/tmp.8UIZOPlF0L /tmp/tmp.y2Zl2MmsJe + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/role-three.json /tmp/tmp.dSbCT5hZXv/role-three + desc 'check creating multiple roles and the users in a single CR apply' + set +o xtrace ----------------------------------------------------------------------------------- check creating multiple roles and the users in a single CR apply ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": { "roles": [ { "role": "role-four", "db": "testAdmin1", "privileges": [ { "resource": { "db": "testAdmin1", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin1", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin1", "collection": "system.version" }, "actions": [ "find" ] } ] }, { "role": "role-five", "db": "testAdmin2", "privileges": [ { "resource": { "db": "testAdmin2", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin2", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin2", "collection": "system.version" }, "actions": [ "find" ] } ] } ], "users": [ { "name": "user-five", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-four", "db": "testAdmin1" }, { "name": "role-five", "db": "testAdmin2" } ] }, { "name": "user-six", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-five", "db": "testAdmin2" } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.snCeryeSPH ++ mktemp + local LAST_ERR=/tmp/tmp.n9rFpnV1eJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": { "roles": [ { "role": "role-four", "db": "testAdmin1", "privileges": [ { "resource": { "db": "testAdmin1", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin1", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin1", "collection": "system.version" }, "actions": [ "find" ] } ] }, { "role": "role-five", "db": "testAdmin2", "privileges": [ { "resource": { "db": "testAdmin2", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin2", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin2", "collection": "system.version" }, "actions": [ "find" ] } ] } ], "users": [ { "name": "user-five", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-four", "db": "testAdmin1" }, { "name": "role-five", "db": "testAdmin2" } ] }, { "name": "user-six", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-five", "db": "testAdmin2" } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.snCeryeSPH perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.n9rFpnV1eJ + rm /tmp/tmp.snCeryeSPH /tmp/tmp.n9rFpnV1eJ + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7KEDbt4vHA +++ mktemp ++ local LAST_ERR=/tmp/tmp.rCJ8tGuo0S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7KEDbt4vHA ++ cat /tmp/tmp.rCJ8tGuo0S ++ rm /tmp/tmp.7KEDbt4vHA /tmp/tmp.rCJ8tGuo0S ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aRzoHd7foG +++ mktemp ++ local LAST_ERR=/tmp/tmp.iqBWLxWE9n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aRzoHd7foG ++ cat /tmp/tmp.iqBWLxWE9n ++ rm /tmp/tmp.aRzoHd7foG /tmp/tmp.iqBWLxWE9n ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare testAdmin1 'db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 role-four + local database=testAdmin1 + local 'command=db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=role-four + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + run_mongo 'use testAdmin1\n db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + sed '/"userId"/d' + local 'command=use testAdmin1\n db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pBZXFrrZ0A +++ mktemp ++ local LAST_ERR=/tmp/tmp.PRgqJ4XMYd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pBZXFrrZ0A ++ cat /tmp/tmp.PRgqJ4XMYd ++ rm /tmp/tmp.pBZXFrrZ0A /tmp/tmp.PRgqJ4XMYd ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use testAdmin1\n db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.fhMXVs3dNR ++ mktemp + local LAST_ERR=/tmp/tmp.aKBismN2j7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use testAdmin1\n db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fhMXVs3dNR + cat /tmp/tmp.aKBismN2j7 + rm /tmp/tmp.fhMXVs3dNR /tmp/tmp.aKBismN2j7 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/role-four.json /tmp/tmp.dSbCT5hZXv/role-four + compare testAdmin2 'db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 role-five + local database=testAdmin2 + local 'command=db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=role-five + run_mongo 'use testAdmin2\n db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use testAdmin2\n db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + sed '/"userId"/d' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Dn0FkQU2xI +++ mktemp ++ local LAST_ERR=/tmp/tmp.FVS71eoWCF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Dn0FkQU2xI ++ cat /tmp/tmp.FVS71eoWCF ++ rm /tmp/tmp.Dn0FkQU2xI /tmp/tmp.FVS71eoWCF ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use testAdmin2\n db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DDj3dNha0b ++ mktemp + local LAST_ERR=/tmp/tmp.tUDZdVxPuK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use testAdmin2\n db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DDj3dNha0b + cat /tmp/tmp.tUDZdVxPuK + rm /tmp/tmp.DDj3dNha0b /tmp/tmp.tUDZdVxPuK + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/role-five.json /tmp/tmp.dSbCT5hZXv/role-five + compare testAdmin 'db.getUser("user-five")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-five + local database=testAdmin + local 'command=db.getUser("user-five")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-five + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + run_mongo 'use testAdmin\n db.getUser("user-five")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use testAdmin\n db.getUser("user-five")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + sed '/"userId"/d' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7RgjYM3wLC +++ mktemp ++ local LAST_ERR=/tmp/tmp.STl4MPXO0D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7RgjYM3wLC ++ cat /tmp/tmp.STl4MPXO0D ++ rm /tmp/tmp.7RgjYM3wLC /tmp/tmp.STl4MPXO0D ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use testAdmin\n db.getUser("user-five")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.GASCUd6JNy ++ mktemp + local LAST_ERR=/tmp/tmp.8MXtm8DdPP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use testAdmin\n db.getUser("user-five")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GASCUd6JNy + cat /tmp/tmp.8MXtm8DdPP + rm /tmp/tmp.GASCUd6JNy /tmp/tmp.8MXtm8DdPP + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-five.json /tmp/tmp.dSbCT5hZXv/user-five + compare testAdmin 'db.getUser("user-six")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 user-six + local database=testAdmin + local 'command=db.getUser("user-six")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local target=user-six + run_mongo 'use testAdmin\n db.getUser("user-six")' userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 mongodb + local 'command=use testAdmin\n db.getUser("user-six")' + local uri=userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + sed '/"userId"/d' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EkGfOMCcsQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.sf50mc9Ztk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EkGfOMCcsQ ++ cat /tmp/tmp.sf50mc9Ztk ++ rm /tmp/tmp.EkGfOMCcsQ /tmp/tmp.sf50mc9Ztk ++ return 0 + local client_container=psmdb-client-6c585f8dbd-l8w2c + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use testAdmin\n db.getUser("user-six")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.G2huvMbdjf ++ mktemp + local LAST_ERR=/tmp/tmp.3YpeKzyzwr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6c585f8dbd-l8w2c -- bash -c 'printf '\''use testAdmin\n db.getUser("user-six")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0.custom-users-roles-6901.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G2huvMbdjf + cat /tmp/tmp.3YpeKzyzwr + rm /tmp/tmp.G2huvMbdjf /tmp/tmp.3YpeKzyzwr + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/e2e-tests/custom-users-roles/compare/user-six.json /tmp/tmp.dSbCT5hZXv/user-six + destroy custom-users-roles-6901 + local namespace=custom-users-roles-6901 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.weSCBJMVzs ++ mktemp + local LAST_ERR=/tmp/tmp.ZCnvX4VFjs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.weSCBJMVzs customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.ZCnvX4VFjs + rm /tmp/tmp.weSCBJMVzs /tmp/tmp.ZCnvX4VFjs + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.l25kkn6bB6 ++ mktemp + local LAST_ERR=/tmp/tmp.DXTbXHHBfg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l25kkn6bB6 + cat /tmp/tmp.DXTbXHHBfg + rm /tmp/tmp.l25kkn6bB6 /tmp/tmp.DXTbXHHBfg + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.mJ0xjjJT0M ++ mktemp + local LAST_ERR=/tmp/tmp.lsSPUo0sqZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mJ0xjjJT0M + cat /tmp/tmp.lsSPUo0sqZ + rm /tmp/tmp.mJ0xjjJT0M /tmp/tmp.lsSPUo0sqZ + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.4IVqWYhtYM ++ mktemp + local LAST_ERR=/tmp/tmp.pD3AdwQTKJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4IVqWYhtYM + cat /tmp/tmp.pD3AdwQTKJ + rm /tmp/tmp.4IVqWYhtYM /tmp/tmp.pD3AdwQTKJ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.zZ3XWhYQvi ++ mktemp + local LAST_ERR=/tmp/tmp.8bCy7bQ5aq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1608/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zZ3XWhYQvi clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.8bCy7bQ5aq + rm /tmp/tmp.zZ3XWhYQvi /tmp/tmp.8bCy7bQ5aq + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.QbzIudX22E ++ mktemp + local LAST_ERR=/tmp/tmp.INUgOgm37o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.QbzIudX22E + cat /tmp/tmp.INUgOgm37o Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.QbzIudX22E + cat /tmp/tmp.INUgOgm37o Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.QbzIudX22E + cat /tmp/tmp.INUgOgm37o Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.QbzIudX22E + cat /tmp/tmp.INUgOgm37o Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.QbzIudX22E /tmp/tmp.INUgOgm37o + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace custom-users-roles-6901 + rm -rf /tmp/tmp.dSbCT5hZXv + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.uG2tlA46LV ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed++ mktemp ----------------------------------------------------------------------------------- + local LAST_ERR=/tmp/tmp.mLB1wLFR6f + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.oaDKaAgIxG + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace custom-users-roles-6901 ++ mktemp + local LAST_ERR=/tmp/tmp.PIiyczFiNI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator