Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/logs/custom-users-roles-sharded.log grep: warning: stray \ before - Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 + cluster=some-name + create_infra custom-users-roles-sharded-29269 + local ns=custom-users-roles-sharded-29269 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.CKNZufquiZ ++ mktemp + local LAST_ERR=/tmp/tmp.gMjHP6Krqg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CKNZufquiZ customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.gMjHP6Krqg + rm /tmp/tmp.CKNZufquiZ /tmp/tmp.gMjHP6Krqg + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.FPUDylLatZ ++ mktemp + local LAST_ERR=/tmp/tmp.kvFdHKTtA2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FPUDylLatZ + cat /tmp/tmp.kvFdHKTtA2 + rm /tmp/tmp.FPUDylLatZ /tmp/tmp.kvFdHKTtA2 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0KFaaYnMf5 ++ mktemp + local LAST_ERR=/tmp/tmp.mkHTYwEwns + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0KFaaYnMf5 + cat /tmp/tmp.mkHTYwEwns + rm /tmp/tmp.0KFaaYnMf5 /tmp/tmp.mkHTYwEwns + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.yt2VniwTi5 ++ mktemp + local LAST_ERR=/tmp/tmp.OOfHcgAvTL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yt2VniwTi5 + cat /tmp/tmp.OOfHcgAvTL + rm /tmp/tmp.yt2VniwTi5 /tmp/tmp.OOfHcgAvTL + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.cn5H9enRIJ ++ mktemp + local LAST_ERR=/tmp/tmp.tSkT1oDy01 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cn5H9enRIJ clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.tSkT1oDy01 + rm /tmp/tmp.cn5H9enRIJ /tmp/tmp.tSkT1oDy01 + return 0 + check_crd_for_deletion PR-1917-02b4bc7d + local git_tag=PR-1917-02b4bc7d ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1917-02b4bc7d/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RLoXTzJR9z +++ mktemp ++ local LAST_ERR=/tmp/tmp.EjB8D7vT0S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.RLoXTzJR9z ++ cat /tmp/tmp.EjB8D7vT0S Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.RLoXTzJR9z ++ cat /tmp/tmp.EjB8D7vT0S Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.RLoXTzJR9z ++ cat /tmp/tmp.EjB8D7vT0S Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.RLoXTzJR9z ++ cat /tmp/tmp.EjB8D7vT0S Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.RLoXTzJR9z /tmp/tmp.EjB8D7vT0S ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' ++ mktemp + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + xargs kubectl delete ns + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.qwi6ZnJFPn ++ mktemp + local LAST_OUT=/tmp/tmp.vUWME3A4O9 ++ mktemp + local LAST_ERR=/tmp/tmp.WtQ6RhZxwo + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.4U0nSPSGwp + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qwi6ZnJFPn + cat /tmp/tmp.WtQ6RhZxwo + rm /tmp/tmp.qwi6ZnJFPn /tmp/tmp.WtQ6RhZxwo + return 0 namespace "custom-users-roles-sharded-1338" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vUWME3A4O9 namespace "psmdb-operator" deleted + cat /tmp/tmp.4U0nSPSGwp + rm /tmp/tmp.vUWME3A4O9 /tmp/tmp.4U0nSPSGwp + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.QmYELsze8p ++ mktemp + local LAST_ERR=/tmp/tmp.FoCOLhkeaW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QmYELsze8p + cat /tmp/tmp.FoCOLhkeaW + rm /tmp/tmp.QmYELsze8p /tmp/tmp.FoCOLhkeaW + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.o3LB4JB340 ++ mktemp + local LAST_ERR=/tmp/tmp.I8e1sVYito + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o3LB4JB340 namespace/psmdb-operator created + cat /tmp/tmp.I8e1sVYito + rm /tmp/tmp.o3LB4JB340 /tmp/tmp.I8e1sVYito + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.a2BfRopYl0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1T61seErc7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a2BfRopYl0 ++ cat /tmp/tmp.1T61seErc7 ++ rm /tmp/tmp.a2BfRopYl0 /tmp/tmp.1T61seErc7 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster7 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Xor1xNXx2C ++ mktemp + local LAST_ERR=/tmp/tmp.pM7D5n1sNl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster7 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Xor1xNXx2C Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster7" modified. + cat /tmp/tmp.pM7D5n1sNl + rm /tmp/tmp.Xor1xNXx2C /tmp/tmp.pM7D5n1sNl + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1917-02b4bc7d' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1917-02b4bc7d ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Q1HYuyXpPs ++ mktemp + local LAST_ERR=/tmp/tmp.73IjtVf7EK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Q1HYuyXpPs customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.73IjtVf7EK + rm /tmp/tmp.Q1HYuyXpPs /tmp/tmp.73IjtVf7EK + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.5k1zwHceHT ++ mktemp + local LAST_ERR=/tmp/tmp.WsiN65HmlP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5k1zwHceHT clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.WsiN65HmlP + rm /tmp/tmp.5k1zwHceHT /tmp/tmp.WsiN65HmlP + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1917-02b4bc7d") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.mwiVjjuRN5 ++ mktemp + local LAST_ERR=/tmp/tmp.i9umyvsgiQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mwiVjjuRN5 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.i9umyvsgiQ + rm /tmp/tmp.mwiVjjuRN5 /tmp/tmp.i9umyvsgiQ + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.gKeqtRRM4z +++ mktemp ++ local LAST_ERR=/tmp/tmp.YynbAbTVy9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gKeqtRRM4z ++ cat /tmp/tmp.YynbAbTVy9 ++ rm /tmp/tmp.gKeqtRRM4z /tmp/tmp.YynbAbTVy9 ++ return 0 + wait_operator_pod percona-server-mongodb-operator-749bfc94f5-zt9xs + local pod=percona-server-mongodb-operator-749bfc94f5-zt9xs + set +o xtrace waiting for pod/percona-server-mongodb-operator-749bfc94f5-zt9xs to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.8sccssCRzx +++ mktemp ++ local LAST_ERR=/tmp/tmp.6gnpMsZuk8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8sccssCRzx ++ cat /tmp/tmp.6gnpMsZuk8 ++ rm /tmp/tmp.8sccssCRzx /tmp/tmp.6gnpMsZuk8 ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-749bfc94f5-zt9xs ++ mktemp + local LAST_OUT=/tmp/tmp.5O0CenJZlt ++ mktemp + local LAST_ERR=/tmp/tmp.QdbxdSfahl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-749bfc94f5-zt9xs + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5O0CenJZlt + cat /tmp/tmp.QdbxdSfahl + rm /tmp/tmp.5O0CenJZlt /tmp/tmp.QdbxdSfahl + return 0 2025-12-18T10:04:16.215Z INFO setup Manager starting up {"gitCommit": "02b4bc7dc98d6ade6d4d249d26a8ac1fdd611e8b", "gitBranch": "PR-1917-02b4bc7d", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace custom-users-roles-sharded-29269 + local namespace=custom-users-roles-sharded-29269 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces custom-users-roles-sharded-29269' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces custom-users-roles-sharded-29269 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace custom-users-roles-sharded-29269 --ignore-not-found ++ mktemp ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.e9a0XTsTbA + local LAST_OUT=/tmp/tmp.441pNQG3D4 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.tyIAX8tbgF + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.4x5HgZpQ32 + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace custom-users-roles-sharded-29269 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e9a0XTsTbA + cat /tmp/tmp.tyIAX8tbgF + rm /tmp/tmp.e9a0XTsTbA /tmp/tmp.tyIAX8tbgF + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.441pNQG3D4 + cat /tmp/tmp.4x5HgZpQ32 + rm /tmp/tmp.441pNQG3D4 /tmp/tmp.4x5HgZpQ32 + return 0 + kubectl_bin wait --for=delete namespace custom-users-roles-sharded-29269 ++ mktemp + local LAST_OUT=/tmp/tmp.zgyMS9ZKPH ++ mktemp + local LAST_ERR=/tmp/tmp.W6OGVftety + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace custom-users-roles-sharded-29269 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zgyMS9ZKPH + cat /tmp/tmp.W6OGVftety + rm /tmp/tmp.zgyMS9ZKPH /tmp/tmp.W6OGVftety + return 0 + desc 'create namespace custom-users-roles-sharded-29269' + set +o xtrace ----------------------------------------------------------------------------------- create namespace custom-users-roles-sharded-29269 ----------------------------------------------------------------------------------- + kubectl_bin create namespace custom-users-roles-sharded-29269 ++ mktemp + local LAST_OUT=/tmp/tmp.dnGMMg8fdW ++ mktemp + local LAST_ERR=/tmp/tmp.VRAeLFwikr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace custom-users-roles-sharded-29269 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dnGMMg8fdW namespace/custom-users-roles-sharded-29269 created + cat /tmp/tmp.VRAeLFwikr + rm /tmp/tmp.dnGMMg8fdW /tmp/tmp.VRAeLFwikr + return 0 + set_kube_ctx custom-users-roles-sharded-29269 + local namespace=custom-users-roles-sharded-29269 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.WyagFX8Sl0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9jeSjdjTW8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WyagFX8Sl0 ++ cat /tmp/tmp.9jeSjdjTW8 ++ rm /tmp/tmp.WyagFX8Sl0 /tmp/tmp.9jeSjdjTW8 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster7 --namespace=custom-users-roles-sharded-29269 ++ mktemp + local LAST_OUT=/tmp/tmp.DFSOxNebWl ++ mktemp + local LAST_ERR=/tmp/tmp.dPnTUXhR5w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster7 --namespace=custom-users-roles-sharded-29269 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DFSOxNebWl Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-02b4bc7d-42-cluster7" modified. + cat /tmp/tmp.dPnTUXhR5w + rm /tmp/tmp.DFSOxNebWl /tmp/tmp.dPnTUXhR5w + return 0 + mongosUri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/conf/app-user-secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.AB3o3YgHTH ++ mktemp + local LAST_ERR=/tmp/tmp.Rb52YeczJE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/conf/app-user-secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AB3o3YgHTH deployment.apps/psmdb-client created secret/some-users created secret/user-one created secret/user-two created + cat /tmp/tmp.Rb52YeczJE + rm /tmp/tmp.AB3o3YgHTH /tmp/tmp.Rb52YeczJE + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.XgqVbjoB4c ++ mktemp + local LAST_ERR=/tmp/tmp.JK6ZKjBIVs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XgqVbjoB4c secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created secret/gcp-cs-sa-key-secret created + cat /tmp/tmp.JK6ZKjBIVs + rm /tmp/tmp.XgqVbjoB4c /tmp/tmp.JK6ZKjBIVs + return 0 + version_gt 1.19 ++ echo '1.31 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/container-rc.yaml + /usr/sbin/sed s/docker/runc/g + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.kPxuX5ErwN ++ mktemp + local LAST_ERR=/tmp/tmp.0kDJLcobFF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kPxuX5ErwN runtimeclass.node.k8s.io/container-rc unchanged + cat /tmp/tmp.0kDJLcobFF + rm /tmp/tmp.kPxuX5ErwN /tmp/tmp.0kDJLcobFF + return 0 + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1917-02b4bc7d"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.DoT5CFUYTa ++ mktemp + local LAST_ERR=/tmp/tmp.DlSKImJJxi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DoT5CFUYTa perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.DlSKImJJxi + rm /tmp/tmp.DoT5CFUYTa /tmp/tmp.DlSKImJJxi + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready..............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0xqcOt3DY3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7ZSfjgCCen ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0xqcOt3DY3 ++ cat /tmp/tmp.7ZSfjgCCen ++ rm /tmp/tmp.0xqcOt3DY3 /tmp/tmp.7ZSfjgCCen ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dj662U1rHF +++ mktemp ++ local LAST_ERR=/tmp/tmp.wvMqPLsSFg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dj662U1rHF ++ cat /tmp/tmp.wvMqPLsSFg ++ rm /tmp/tmp.dj662U1rHF /tmp/tmp.wvMqPLsSFg ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ayE5rBmKhx +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vx7LMLEJeu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ayE5rBmKhx ++ cat /tmp/tmp.Vx7LMLEJeu ++ rm /tmp/tmp.ayE5rBmKhx /tmp/tmp.Vx7LMLEJeu ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ksbQk6OADn +++ mktemp ++ local LAST_ERR=/tmp/tmp.BqVj2jSXsz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ksbQk6OADn ++ cat /tmp/tmp.BqVj2jSXsz ++ rm /tmp/tmp.ksbQk6OADn /tmp/tmp.BqVj2jSXsz ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2H4HJPLbzw +++ mktemp ++ local LAST_ERR=/tmp/tmp.vkjXQQAeiR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2H4HJPLbzw ++ cat /tmp/tmp.vkjXQQAeiR ++ rm /tmp/tmp.2H4HJPLbzw /tmp/tmp.vkjXQQAeiR ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EFko3jNRvj +++ mktemp ++ local LAST_ERR=/tmp/tmp.I1yny9tAYH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EFko3jNRvj ++ cat /tmp/tmp.I1yny9tAYH ++ rm /tmp/tmp.EFko3jNRvj /tmp/tmp.I1yny9tAYH ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l4zw9Vn65z +++ mktemp ++ local LAST_ERR=/tmp/tmp.c7pxWDaWnx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l4zw9Vn65z ++ cat /tmp/tmp.c7pxWDaWnx ++ rm /tmp/tmp.l4zw9Vn65z /tmp/tmp.c7pxWDaWnx ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7kvzR3uWUL +++ mktemp ++ local LAST_ERR=/tmp/tmp.N32w27l7KY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7kvzR3uWUL ++ cat /tmp/tmp.N32w27l7KY ++ rm /tmp/tmp.7kvzR3uWUL /tmp/tmp.N32w27l7KY ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l4SFw6uYhM +++ mktemp ++ local LAST_ERR=/tmp/tmp.i3Ydha19Et ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l4SFw6uYhM ++ cat /tmp/tmp.i3Ydha19Et ++ rm /tmp/tmp.l4SFw6uYhM /tmp/tmp.i3Ydha19Et ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2JyH8hCnI5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.fN63jWZnEI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2JyH8hCnI5 ++ cat /tmp/tmp.fN63jWZnEI ++ rm /tmp/tmp.2JyH8hCnI5 /tmp/tmp.fN63jWZnEI ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.cR5mi0auxS/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("custom-users-roles-sharded-29269", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.CpmYgr9OJN ++ mktemp + local LAST_ERR=/tmp/tmp.HI3a2bxR7O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CpmYgr9OJN + cat /tmp/tmp.HI3a2bxR7O + rm /tmp/tmp.CpmYgr9OJN /tmp/tmp.HI3a2bxR7O + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.cR5mi0auxS/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.cR5mi0auxS/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.cR5mi0auxS/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0.yml /tmp/tmp.cR5mi0auxS/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-18T10:08:07+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.cR5mi0auxS/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("custom-users-roles-sharded-29269", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.RCy7jyrouD ++ mktemp + local LAST_ERR=/tmp/tmp.dDXvDFn2JW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RCy7jyrouD + cat /tmp/tmp.dDXvDFn2JW + rm /tmp/tmp.RCy7jyrouD /tmp/tmp.dDXvDFn2JW + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.cR5mi0auxS/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.cR5mi0auxS/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.cR5mi0auxS/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.cR5mi0auxS/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-12-18T10:08:08+0000] compare_kubectl: statefulset/some-name-cfg OK + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.cR5mi0auxS/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("custom-users-roles-sharded-29269", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.kyMC0cmgNA ++ mktemp + local LAST_ERR=/tmp/tmp.cOAuBxybQA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kyMC0cmgNA + cat /tmp/tmp.cOAuBxybQA + rm /tmp/tmp.kyMC0cmgNA /tmp/tmp.cOAuBxybQA + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.cR5mi0auxS/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.cR5mi0auxS/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.cR5mi0auxS/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.cR5mi0auxS/statefulset_some-name-mongos.yml + log 'compare_kubectl: statefulset/some-name-mongos OK' + set +o xtrace [2025-12-18T10:08:10+0000] compare_kubectl: statefulset/some-name-mongos OK + desc 'check user created on cluster creation' + set +o xtrace ----------------------------------------------------------------------------------- check user created on cluster creation ----------------------------------------------------------------------------------- + userOne=user-one ++ getSecretData user-one userOnePassKey ++ local secretName=user-one ++ local dataKey=userOnePassKey +++ kubectl get secrets/user-one '--template={{.data.userOnePassKey}}' +++ base64 -d ++ local data=clusterMonitor ++ echo clusterMonitor + userOnePass=clusterMonitor ++ get_user_cmd '"user-one"' ++ local 'user="user-one"' ++ cmd='(function() { var user = db.getUser("user-one"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-one"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare admin '(function() { var user = db.getUser("user-one"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-one + local database=admin + local 'command=(function() { var user = db.getUser("user-one"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-one + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var user = db.getUser("user-one"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + local 'command=use admin\n (function() { var user = db.getUser("user-one"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NuTGfKxuGQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.qWHKvzzcqq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NuTGfKxuGQ ++ cat /tmp/tmp.qWHKvzzcqq ++ rm /tmp/tmp.NuTGfKxuGQ /tmp/tmp.qWHKvzzcqq ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-one"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.SA8cckwnHp ++ mktemp + local LAST_ERR=/tmp/tmp.GZxkBNkVpm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-one"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SA8cckwnHp + cat /tmp/tmp.GZxkBNkVpm + rm /tmp/tmp.SA8cckwnHp /tmp/tmp.GZxkBNkVpm + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-one.json /tmp/tmp.cR5mi0auxS/user-one + check_auth user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 + local uri=user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 ++ run_mongos 'db.runCommand({ ping: 1 }).ok' user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 '' '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ echo .svc.cluster.local +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ms3iPSIqpv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FJCgqORJiA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ms3iPSIqpv +++ cat /tmp/tmp.FJCgqORJiA +++ rm /tmp/tmp.ms3iPSIqpv /tmp/tmp.FJCgqORJiA +++ return 0 ++ local client_container=psmdb-client-696897d69b-zwwz8 ++ kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TURRPJMa00 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z3DUss3Sca ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TURRPJMa00 ++ cat /tmp/tmp.Z3DUss3Sca ++ rm /tmp/tmp.TURRPJMa00 /tmp/tmp.Z3DUss3Sca ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + generatedUserSecret=some-name-custom-user-secret ++ base64 -d ++ kubectl_bin get secret some-name-custom-user-secret -o 'jsonpath={.data.user-gen}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2aTbTjN3mU +++ mktemp ++ local LAST_ERR=/tmp/tmp.YBriqN9rQB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get secret some-name-custom-user-secret -o 'jsonpath={.data.user-gen}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2aTbTjN3mU ++ cat /tmp/tmp.YBriqN9rQB ++ rm /tmp/tmp.2aTbTjN3mU /tmp/tmp.YBriqN9rQB ++ return 0 + generatedPass=rszHJJgEDVhqPnmjg ++ get_user_cmd '"user-gen"' ++ local 'user="user-gen"' ++ cmd='(function() { var user = db.getUser("user-gen"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-gen"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare admin '(function() { var user = db.getUser("user-gen"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-gen + local database=admin + local 'command=(function() { var user = db.getUser("user-gen"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-gen + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var user = db.getUser("user-gen"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + local 'command=use admin\n (function() { var user = db.getUser("user-gen"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K7l3bcxBg8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.snvjB8wBOT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K7l3bcxBg8 ++ cat /tmp/tmp.snvjB8wBOT ++ rm /tmp/tmp.K7l3bcxBg8 /tmp/tmp.snvjB8wBOT ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-gen"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.yD2XnoVtUq ++ mktemp + local LAST_ERR=/tmp/tmp.bh0M4Jry6s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-gen"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yD2XnoVtUq + cat /tmp/tmp.bh0M4Jry6s + rm /tmp/tmp.yD2XnoVtUq /tmp/tmp.bh0M4Jry6s + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-gen.json /tmp/tmp.cR5mi0auxS/user-gen + check_auth user-gen:rszHJJgEDVhqPnmjg@some-name-mongos.custom-users-roles-sharded-29269 + local uri=user-gen:rszHJJgEDVhqPnmjg@some-name-mongos.custom-users-roles-sharded-29269 ++ run_mongos 'db.runCommand({ ping: 1 }).ok' user-gen:rszHJJgEDVhqPnmjg@some-name-mongos.custom-users-roles-sharded-29269 '' '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-gen:rszHJJgEDVhqPnmjg@some-name-mongos.custom-users-roles-sharded-29269 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp egrep: warning: egrep is obsolescent; using grep -E +++ local LAST_OUT=/tmp/tmp.w4cqMnHcE9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1Ktq5nYqvT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.w4cqMnHcE9 +++ cat /tmp/tmp.1Ktq5nYqvT +++ rm /tmp/tmp.w4cqMnHcE9 /tmp/tmp.1Ktq5nYqvT +++ return 0 ++ local client_container=psmdb-client-696897d69b-zwwz8 ++ kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-gen:rszHJJgEDVhqPnmjg@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QZu8A1jbGL +++ mktemp ++ local LAST_ERR=/tmp/tmp.U7vxveZ9OJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-gen:rszHJJgEDVhqPnmjg@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QZu8A1jbGL ++ cat /tmp/tmp.U7vxveZ9OJ ++ rm /tmp/tmp.QZu8A1jbGL /tmp/tmp.U7vxveZ9OJ ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' ++ get_user_cmd '"user-external"' ++ local 'user="user-external"' ++ cmd='(function() { var user = db.getUser("user-external"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-external"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare '$external' '(function() { var user = db.getUser("user-external"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-external + local 'database=$external' + local 'command=(function() { var user = db.getUser("user-external"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-external + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use $external\n (function() { var user = db.getUser("user-external"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use $external\n (function() { var user = db.getUser("user-external"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + local mongo_bin=mongo + sed '/"userId"/d' ++ awk -F: '{print $2}' ++ echo .svc.cluster.local egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UYw9TgpMag +++ mktemp ++ local LAST_ERR=/tmp/tmp.78uP5Csszx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UYw9TgpMag ++ cat /tmp/tmp.78uP5Csszx ++ rm /tmp/tmp.UYw9TgpMag /tmp/tmp.78uP5Csszx ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use $external\n (function() { var user = db.getUser("user-external"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.pXoPjAu3RY ++ mktemp + local LAST_ERR=/tmp/tmp.I5OerFUGxT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use $external\n (function() { var user = db.getUser("user-external"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pXoPjAu3RY + cat /tmp/tmp.I5OerFUGxT + rm /tmp/tmp.pXoPjAu3RY /tmp/tmp.I5OerFUGxT + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-external.json /tmp/tmp.cR5mi0auxS/user-external + desc 'delete initial user from CR and create a new one' + set +o xtrace ----------------------------------------------------------------------------------- delete initial user from CR and create a new one ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.5psfEmthkt ++ mktemp + local LAST_ERR=/tmp/tmp.iIABmBkDUj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5psfEmthkt perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.iIABmBkDUj + rm /tmp/tmp.5psfEmthkt /tmp/tmp.iIABmBkDUj + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zL4fY3K3vw +++ mktemp ++ local LAST_ERR=/tmp/tmp.yhuzxMUFM8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zL4fY3K3vw ++ cat /tmp/tmp.yhuzxMUFM8 ++ rm /tmp/tmp.zL4fY3K3vw /tmp/tmp.yhuzxMUFM8 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2QS9JUU93n +++ mktemp ++ local LAST_ERR=/tmp/tmp.TNEtWt7uh8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2QS9JUU93n ++ cat /tmp/tmp.TNEtWt7uh8 ++ rm /tmp/tmp.2QS9JUU93n /tmp/tmp.TNEtWt7uh8 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.POWkTiQBDN +++ mktemp ++ local LAST_ERR=/tmp/tmp.shpZG8SLdc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.POWkTiQBDN ++ cat /tmp/tmp.shpZG8SLdc ++ rm /tmp/tmp.POWkTiQBDN /tmp/tmp.shpZG8SLdc ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness ++ get_user_cmd '"user-two"' ++ local 'user="user-two"' ++ cmd='(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare admin '(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-two + local database=admin + local 'command=(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-two + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + local mongo_bin=mongo + sed '/"userId"/d' ++ echo .svc.cluster.local egrep: warning: egrep is obsolescent; using grep -E ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fk9DrsVcxD +++ mktemp ++ local LAST_ERR=/tmp/tmp.LItOFg8Lhr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fk9DrsVcxD ++ cat /tmp/tmp.LItOFg8Lhr ++ rm /tmp/tmp.Fk9DrsVcxD /tmp/tmp.LItOFg8Lhr ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.nmGAmWgpC3 ++ mktemp + local LAST_ERR=/tmp/tmp.mEb0RvNIEr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nmGAmWgpC3 + cat /tmp/tmp.mEb0RvNIEr + rm /tmp/tmp.nmGAmWgpC3 /tmp/tmp.mEb0RvNIEr + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-two.json /tmp/tmp.cR5mi0auxS/user-two + userTwo=user-two ++ getSecretData user-two userTwoPassKey ++ local secretName=user-two ++ local dataKey=userTwoPassKey +++ kubectl get secrets/user-two '--template={{.data.userTwoPassKey}}' +++ base64 -d ++ local data=clusterMonitor ++ echo clusterMonitor + userTwoPass=clusterMonitor + check_auth user-two:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 + local uri=user-two:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 ++ run_mongos 'db.runCommand({ ping: 1 }).ok' user-two:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 '' '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-two:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local port=27017 ++ local mongo_bin=mongo +++ echo .svc.cluster.local +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gFzr7hd4VE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Bfuxd7SsQi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gFzr7hd4VE +++ cat /tmp/tmp.Bfuxd7SsQi +++ rm /tmp/tmp.gFzr7hd4VE /tmp/tmp.Bfuxd7SsQi +++ return 0 ++ local client_container=psmdb-client-696897d69b-zwwz8 ++ kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3D5uEcmmcH +++ mktemp ++ local LAST_ERR=/tmp/tmp.CaiOs6KRJk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3D5uEcmmcH ++ cat /tmp/tmp.CaiOs6KRJk ++ rm /tmp/tmp.3D5uEcmmcH /tmp/tmp.CaiOs6KRJk ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_auth user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 + local uri=user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 ++ run_mongos 'db.runCommand({ ping: 1 }).ok' user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 '' '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local port=27017 ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local mongo_bin=mongo +++ echo .svc.cluster.local +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RImpSsK6KY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BrL2sPBTq2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RImpSsK6KY +++ cat /tmp/tmp.BrL2sPBTq2 +++ rm /tmp/tmp.RImpSsK6KY /tmp/tmp.BrL2sPBTq2 +++ return 0 ++ local client_container=psmdb-client-696897d69b-zwwz8 ++ kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r5XisNsBOK +++ mktemp ++ local LAST_ERR=/tmp/tmp.9nhFTTeFgn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-one:clusterMonitor@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r5XisNsBOK ++ cat /tmp/tmp.9nhFTTeFgn ++ rm /tmp/tmp.r5XisNsBOK /tmp/tmp.9nhFTTeFgn ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'check password change' + set +o xtrace ----------------------------------------------------------------------------------- check password change ----------------------------------------------------------------------------------- + userTwoNewPass=new-user-two-password ++ echo -n new-user-two-password ++ base64 + patch_secret user-two userTwoPassKey bmV3LXVzZXItdHdvLXBhc3N3b3Jk + local secret=user-two + local key=userTwoPassKey + local value=bmV3LXVzZXItdHdvLXBhc3N3b3Jk + kubectl patch secret user-two '-p={"data":{"userTwoPassKey": "bmV3LXVzZXItdHdvLXBhc3N3b3Jk"}}' secret/user-two patched + sleep 20 + check_auth user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 + local uri=user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 ++ run_mongos 'db.runCommand({ ping: 1 }).ok' user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 '' '' --quiet ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local port=27017 ++ local mongo_bin=mongo +++ echo .svc.cluster.local +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OSNQjyJPZi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GNQrzTGUu9 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OSNQjyJPZi +++ cat /tmp/tmp.GNQrzTGUu9 +++ rm /tmp/tmp.OSNQjyJPZi /tmp/tmp.GNQrzTGUu9 +++ return 0 ++ local client_container=psmdb-client-696897d69b-zwwz8 ++ kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hq3F1KQ6yd +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xow1nVAsEL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hq3F1KQ6yd ++ cat /tmp/tmp.Xow1nVAsEL ++ rm /tmp/tmp.hq3F1KQ6yd /tmp/tmp.Xow1nVAsEL ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'check user roles update from CR' + set +o xtrace ----------------------------------------------------------------------------------- check user roles update from CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.2F7UgIZ6WG ++ mktemp + local LAST_ERR=/tmp/tmp.298iyk0VOF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-two", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2F7UgIZ6WG perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.298iyk0VOF + rm /tmp/tmp.2F7UgIZ6WG /tmp/tmp.298iyk0VOF + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EVOVxEVHfE +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mg1vwLx3UR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EVOVxEVHfE ++ cat /tmp/tmp.Mg1vwLx3UR ++ rm /tmp/tmp.EVOVxEVHfE /tmp/tmp.Mg1vwLx3UR ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tJ94CVItIw +++ mktemp ++ local LAST_ERR=/tmp/tmp.RZXa142rM0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tJ94CVItIw ++ cat /tmp/tmp.RZXa142rM0 ++ rm /tmp/tmp.tJ94CVItIw /tmp/tmp.RZXa142rM0 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HPkJrNaK6h +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vc6hCwZMGV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HPkJrNaK6h ++ cat /tmp/tmp.Vc6hCwZMGV ++ rm /tmp/tmp.HPkJrNaK6h /tmp/tmp.Vc6hCwZMGV ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness ++ get_user_cmd '"user-two"' ++ local 'user="user-two"' ++ cmd='(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare admin '(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-two-update-roles + local database=admin + local 'command=(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-two-update-roles + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AcGTGWifSP +++ mktemp ++ local LAST_ERR=/tmp/tmp.4PllpMBiT2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AcGTGWifSP ++ cat /tmp/tmp.4PllpMBiT2 ++ rm /tmp/tmp.AcGTGWifSP /tmp/tmp.4PllpMBiT2 ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.AQw7N97TmJ ++ mktemp + local LAST_ERR=/tmp/tmp.ODaTRyukGJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AQw7N97TmJ + cat /tmp/tmp.ODaTRyukGJ + rm /tmp/tmp.AQw7N97TmJ /tmp/tmp.ODaTRyukGJ + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-two-update-roles.json /tmp/tmp.cR5mi0auxS/user-two-update-roles + desc 'check user roles update from DB' + set +o xtrace ----------------------------------------------------------------------------------- check user roles update from DB ----------------------------------------------------------------------------------- + run_mongos 'use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local 'command=use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A9psdXftLo +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fy51ScZLUT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A9psdXftLo ++ cat /tmp/tmp.Fy51ScZLUT ++ rm /tmp/tmp.A9psdXftLo /tmp/tmp.Fy51ScZLUT ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.OXslLHFWIw ++ mktemp + local LAST_ERR=/tmp/tmp.mL3UrDNsBc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OXslLHFWIw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("defde838-3d41-476b-b703-7c73973e2d5f") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db admin bye + cat /tmp/tmp.mL3UrDNsBc + rm /tmp/tmp.OXslLHFWIw /tmp/tmp.mL3UrDNsBc + return 0 + sleep 15 ++ get_user_cmd '"user-two"' ++ local 'user="user-two"' ++ cmd='(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare admin '(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-two-update-roles + local database=admin + local 'command=(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-two-update-roles + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongos 'use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + sed '/"userId"/d' + local 'command=use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0Y3h1GFr3B +++ mktemp ++ local LAST_ERR=/tmp/tmp.2gKHGkoUs2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0Y3h1GFr3B ++ cat /tmp/tmp.2gKHGkoUs2 ++ rm /tmp/tmp.0Y3h1GFr3B /tmp/tmp.2gKHGkoUs2 ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.1ufZAUlMS0 ++ mktemp + local LAST_ERR=/tmp/tmp.lDTKlIAcEM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1ufZAUlMS0 + cat /tmp/tmp.lDTKlIAcEM + rm /tmp/tmp.1ufZAUlMS0 /tmp/tmp.lDTKlIAcEM + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-two-update-roles.json /tmp/tmp.cR5mi0auxS/user-two-update-roles + desc 'check user recreated after deleted from DB' + set +o xtrace ----------------------------------------------------------------------------------- check user recreated after deleted from DB ----------------------------------------------------------------------------------- + run_mongos 'use admin\n db.dropUser("user-two")' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local 'command=use admin\n db.dropUser("user-two")' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JjR07SfJaf +++ mktemp ++ local LAST_ERR=/tmp/tmp.YQIRazQGrs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JjR07SfJaf ++ cat /tmp/tmp.YQIRazQGrs ++ rm /tmp/tmp.JjR07SfJaf /tmp/tmp.YQIRazQGrs ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n db.dropUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.N1OMfGFOXb ++ mktemp + local LAST_ERR=/tmp/tmp.Ogx6c2Tnec + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n db.dropUser("user-two")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.N1OMfGFOXb Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("4e786682-6d6e-4351-9bf1-ce46b6930328") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db admin true bye + cat /tmp/tmp.Ogx6c2Tnec + rm /tmp/tmp.N1OMfGFOXb /tmp/tmp.Ogx6c2Tnec + return 0 + sleep 15 ++ get_user_cmd '"user-two"' ++ local 'user="user-two"' ++ cmd='(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare admin '(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-two-update-roles + local database=admin + local 'command=(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-two-update-roles + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UNohCosm9K +++ mktemp ++ local LAST_ERR=/tmp/tmp.AivhqREJfv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UNohCosm9K ++ cat /tmp/tmp.AivhqREJfv ++ rm /tmp/tmp.UNohCosm9K /tmp/tmp.AivhqREJfv ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.2IHFEOFTvw ++ mktemp + local LAST_ERR=/tmp/tmp.VZDAHbLdER + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2IHFEOFTvw + cat /tmp/tmp.VZDAHbLdER + rm /tmp/tmp.2IHFEOFTvw /tmp/tmp.VZDAHbLdER + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-two-update-roles.json /tmp/tmp.cR5mi0auxS/user-two-update-roles + desc 'check new user created after updated user name via CR' + set +o xtrace ----------------------------------------------------------------------------------- check new user created after updated user name via CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.UxuPA0kpOe ++ mktemp + local LAST_ERR=/tmp/tmp.hpdgTaeoPG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"admin", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UxuPA0kpOe perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.hpdgTaeoPG + rm /tmp/tmp.UxuPA0kpOe /tmp/tmp.hpdgTaeoPG + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mxeQgjVDRW +++ mktemp ++ local LAST_ERR=/tmp/tmp.bkQ8ZU8vVt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mxeQgjVDRW ++ cat /tmp/tmp.bkQ8ZU8vVt ++ rm /tmp/tmp.mxeQgjVDRW /tmp/tmp.bkQ8ZU8vVt ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sWcpIsarEa +++ mktemp ++ local LAST_ERR=/tmp/tmp.JKnzFggnO2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sWcpIsarEa ++ cat /tmp/tmp.JKnzFggnO2 ++ rm /tmp/tmp.sWcpIsarEa /tmp/tmp.JKnzFggnO2 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3l4ShFXi2g +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lo8RerG87q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3l4ShFXi2g ++ cat /tmp/tmp.Lo8RerG87q ++ rm /tmp/tmp.3l4ShFXi2g /tmp/tmp.Lo8RerG87q ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness ++ get_user_cmd '"user-three"' ++ local 'user="user-three"' ++ cmd='(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare admin '(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-three-admin-db + local database=admin + local 'command=(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-three-admin-db + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6vUKpQhdu3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BOxE8umn85 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6vUKpQhdu3 ++ cat /tmp/tmp.BOxE8umn85 ++ rm /tmp/tmp.6vUKpQhdu3 /tmp/tmp.BOxE8umn85 ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.22n5sXBb1C ++ mktemp + local LAST_ERR=/tmp/tmp.MKq5aBawBT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.22n5sXBb1C + cat /tmp/tmp.MKq5aBawBT + rm /tmp/tmp.22n5sXBb1C /tmp/tmp.MKq5aBawBT + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-three-admin-db.json /tmp/tmp.cR5mi0auxS/user-three-admin-db ++ get_user_cmd '"user-two"' ++ local 'user="user-two"' ++ cmd='(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare admin '(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-two-update-roles + local database=admin + local 'command=(function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-two-update-roles + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + local 'command=use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local egrep: warning: egrep is obsolescent; using grep -E ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XXlOkS4BuD +++ mktemp ++ local LAST_ERR=/tmp/tmp.aNzJlJ15Un ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XXlOkS4BuD ++ cat /tmp/tmp.aNzJlJ15Un ++ rm /tmp/tmp.XXlOkS4BuD /tmp/tmp.aNzJlJ15Un ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.JFsIFSRGme ++ mktemp + local LAST_ERR=/tmp/tmp.WByB2kQ6HE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-two"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JFsIFSRGme + cat /tmp/tmp.WByB2kQ6HE + rm /tmp/tmp.JFsIFSRGme /tmp/tmp.WByB2kQ6HE + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-two-update-roles.json /tmp/tmp.cR5mi0auxS/user-two-update-roles + check_auth user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 + local uri=user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ run_mongos 'db.runCommand({ ping: 1 }).ok' user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 '' '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local port=27017 ++ local mongo_bin=mongo +++ echo .svc.cluster.local +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aaoW7EelDs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CsSla6cUJD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aaoW7EelDs +++ cat /tmp/tmp.CsSla6cUJD +++ rm /tmp/tmp.aaoW7EelDs /tmp/tmp.CsSla6cUJD +++ return 0 ++ local client_container=psmdb-client-696897d69b-zwwz8 ++ kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BHa7TK4D79 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hOtXsWz3k1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-two:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BHa7TK4D79 ++ cat /tmp/tmp.hOtXsWz3k1 ++ rm /tmp/tmp.BHa7TK4D79 /tmp/tmp.hOtXsWz3k1 ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_auth user-three:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 + local uri=user-three:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 ++ run_mongos 'db.runCommand({ ping: 1 }).ok' user-three:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 '' '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=user-three:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local port=27017 ++ local mongo_bin=mongo ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ echo .svc.cluster.local +++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.v1ljYgzIfQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.H3axk590Br +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.v1ljYgzIfQ +++ cat /tmp/tmp.H3axk590Br +++ rm /tmp/tmp.v1ljYgzIfQ /tmp/tmp.H3axk590Br +++ return 0 ++ local client_container=psmdb-client-696897d69b-zwwz8 ++ kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-three:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iy7P306uOZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.d3FaqEl2Zs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://user-three:new-user-two-password@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iy7P306uOZ ++ cat /tmp/tmp.d3FaqEl2Zs ++ rm /tmp/tmp.iy7P306uOZ /tmp/tmp.d3FaqEl2Zs ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'check new user created after updated user db via CR' + set +o xtrace ----------------------------------------------------------------------------------- check new user created after updated user db via CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"newDb", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.3itkXuQHpg ++ mktemp + local LAST_ERR=/tmp/tmp.CXoC2tJNYP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-three", "db":"newDb", "passwordSecretRef": { "name": "user-two", "key": "userTwoPassKey" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3itkXuQHpg perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.CXoC2tJNYP + rm /tmp/tmp.3itkXuQHpg /tmp/tmp.CXoC2tJNYP + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dWsfTiVsPY +++ mktemp ++ local LAST_ERR=/tmp/tmp.eolceClyqb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dWsfTiVsPY ++ cat /tmp/tmp.eolceClyqb ++ rm /tmp/tmp.dWsfTiVsPY /tmp/tmp.eolceClyqb ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.938ERs1GuJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.7eilA46EHc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.938ERs1GuJ ++ cat /tmp/tmp.7eilA46EHc ++ rm /tmp/tmp.938ERs1GuJ /tmp/tmp.7eilA46EHc ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2LCvgc93kE +++ mktemp ++ local LAST_ERR=/tmp/tmp.w41mZoCx2w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2LCvgc93kE ++ cat /tmp/tmp.w41mZoCx2w ++ rm /tmp/tmp.2LCvgc93kE /tmp/tmp.w41mZoCx2w ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness ++ get_user_cmd '"user-three"' ++ local 'user="user-three"' ++ cmd='(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare newDb '(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-three-newDb-db + local database=newDb + local 'command=(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-three-newDb-db + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use newDb\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + local 'command=use newDb\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vFdVEzvJCa +++ mktemp ++ local LAST_ERR=/tmp/tmp.uGIb7ssKhM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vFdVEzvJCa ++ cat /tmp/tmp.uGIb7ssKhM ++ rm /tmp/tmp.vFdVEzvJCa /tmp/tmp.uGIb7ssKhM ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use newDb\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.FcBuLvDO3R ++ mktemp + local LAST_ERR=/tmp/tmp.hEHKyv0s2H + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use newDb\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FcBuLvDO3R + cat /tmp/tmp.hEHKyv0s2H + rm /tmp/tmp.FcBuLvDO3R /tmp/tmp.hEHKyv0s2H + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-three-newDb-db.json /tmp/tmp.cR5mi0auxS/user-three-newDb-db ++ get_user_cmd '"user-three"' ++ local 'user="user-three"' ++ cmd='(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare admin '(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-three-admin-db + local database=admin + local 'command=(function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-three-admin-db + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.J50sI0FPdV +++ mktemp ++ local LAST_ERR=/tmp/tmp.0PPqaloPU1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J50sI0FPdV ++ cat /tmp/tmp.0PPqaloPU1 ++ rm /tmp/tmp.J50sI0FPdV /tmp/tmp.0PPqaloPU1 ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.hg8iXZCFJV ++ mktemp + local LAST_ERR=/tmp/tmp.4DxErC4PZj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-three"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hg8iXZCFJV + cat /tmp/tmp.4DxErC4PZj + rm /tmp/tmp.hg8iXZCFJV /tmp/tmp.4DxErC4PZj + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-three-admin-db.json /tmp/tmp.cR5mi0auxS/user-three-admin-db + desc 'check new user created with default db and secret password key' + set +o xtrace ----------------------------------------------------------------------------------- check new user created with default db and secret password key ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-four", "passwordSecretRef": { "name": "user-two" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' ++ mktemp + local LAST_OUT=/tmp/tmp.aKQpwzc5RB ++ mktemp + local LAST_ERR=/tmp/tmp.gpiskuaVqq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"users":[ { "name":"user-four", "passwordSecretRef": { "name": "user-two" }, "roles": [ {"db":"admin","name":"clusterAdmin"} ] } ]} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aKQpwzc5RB perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.gpiskuaVqq + rm /tmp/tmp.aKQpwzc5RB /tmp/tmp.gpiskuaVqq + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pcnuWchf76 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NkZtuZDc8g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pcnuWchf76 ++ cat /tmp/tmp.NkZtuZDc8g ++ rm /tmp/tmp.pcnuWchf76 /tmp/tmp.NkZtuZDc8g ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UI0OC27QfT +++ mktemp ++ local LAST_ERR=/tmp/tmp.OKqqYEoBT2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UI0OC27QfT ++ cat /tmp/tmp.OKqqYEoBT2 ++ rm /tmp/tmp.UI0OC27QfT /tmp/tmp.OKqqYEoBT2 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7TPmZPFzR8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.nyS3mMSnaq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7TPmZPFzR8 ++ cat /tmp/tmp.nyS3mMSnaq ++ rm /tmp/tmp.7TPmZPFzR8 /tmp/tmp.nyS3mMSnaq ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness ++ get_user_cmd '"user-four"' ++ local 'user="user-four"' ++ cmd='(function() { var user = db.getUser("user-four"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-four"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare admin '(function() { var user = db.getUser("user-four"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-four + local database=admin + local 'command=(function() { var user = db.getUser("user-four"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-four + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var user = db.getUser("user-four"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + local 'command=use admin\n (function() { var user = db.getUser("user-four"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kL7dvWW0xq +++ mktemp ++ local LAST_ERR=/tmp/tmp.3wM6Emv4hs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kL7dvWW0xq ++ cat /tmp/tmp.3wM6Emv4hs ++ rm /tmp/tmp.kL7dvWW0xq /tmp/tmp.3wM6Emv4hs ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-four"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Kr4BOi4OnD ++ mktemp + local LAST_ERR=/tmp/tmp.PblxqPyfb5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var user = db.getUser("user-four"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Kr4BOi4OnD + cat /tmp/tmp.PblxqPyfb5 + rm /tmp/tmp.Kr4BOi4OnD /tmp/tmp.PblxqPyfb5 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-four.json /tmp/tmp.cR5mi0auxS/user-four + desc 'check user role on cluster initialization' + set +o xtrace ----------------------------------------------------------------------------------- check user role on cluster initialization ----------------------------------------------------------------------------------- ++ get_role_cmd '"role-one"' ++ local 'role="role-one"' ++ cmd='(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' ++ echo '(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + compare admin '(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 role-one + local database=admin + local 'command=(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=role-one + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + local 'command=use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qza9YGZ8sQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.udx6LWcDjO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qza9YGZ8sQ ++ cat /tmp/tmp.udx6LWcDjO ++ rm /tmp/tmp.Qza9YGZ8sQ /tmp/tmp.udx6LWcDjO ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.RgKIThRRgG ++ mktemp + local LAST_ERR=/tmp/tmp.oo03YK2vUD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RgKIThRRgG + cat /tmp/tmp.oo03YK2vUD + rm /tmp/tmp.RgKIThRRgG /tmp/tmp.oo03YK2vUD + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/role-one.json /tmp/tmp.cR5mi0auxS/role-one + desc 'check role recreated after deleted from DB' + set +o xtrace ----------------------------------------------------------------------------------- check role recreated after deleted from DB ----------------------------------------------------------------------------------- + run_mongos 'use admin\n db.dropRole("role-one")' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local 'command=use admin\n db.dropRole("role-one")' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HfKVmt864Z +++ mktemp ++ local LAST_ERR=/tmp/tmp.INqGFZquQD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HfKVmt864Z ++ cat /tmp/tmp.INqGFZquQD ++ rm /tmp/tmp.HfKVmt864Z /tmp/tmp.INqGFZquQD ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n db.dropRole("role-one")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.YKQmCbgbuQ ++ mktemp + local LAST_ERR=/tmp/tmp.erCqlWXstL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n db.dropRole("role-one")\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YKQmCbgbuQ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("12c6e37f-7663-4d37-ab11-80e60c2724e3") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db admin true bye + cat /tmp/tmp.erCqlWXstL + rm /tmp/tmp.YKQmCbgbuQ /tmp/tmp.erCqlWXstL + return 0 + sleep 15 ++ get_role_cmd '"role-one"' ++ local 'role="role-one"' ++ cmd='(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' ++ echo '(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + compare admin '(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 role-one + local database=admin + local 'command=(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=role-one + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9duQUug02A +++ mktemp ++ local LAST_ERR=/tmp/tmp.KreA4a3zs5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9duQUug02A ++ cat /tmp/tmp.KreA4a3zs5 ++ rm /tmp/tmp.9duQUug02A /tmp/tmp.KreA4a3zs5 ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.CjMXE1hK4L ++ mktemp + local LAST_ERR=/tmp/tmp.fKIoZMfAUV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CjMXE1hK4L + cat /tmp/tmp.fKIoZMfAUV + rm /tmp/tmp.CjMXE1hK4L /tmp/tmp.fKIoZMfAUV + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/role-one.json /tmp/tmp.cR5mi0auxS/role-one + desc 'delete initial role from CR and create a new one' + set +o xtrace ----------------------------------------------------------------------------------- delete initial role from CR and create a new one ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ], "roles": [ { "role": "read", "db": "admin" } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.3uyXb6Lwtj ++ mktemp + local LAST_ERR=/tmp/tmp.IPwXyJPE4G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ], "roles": [ { "role": "read", "db": "admin" } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3uyXb6Lwtj perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.IPwXyJPE4G + rm /tmp/tmp.3uyXb6Lwtj /tmp/tmp.IPwXyJPE4G + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uhhs2Ysmls +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZrE57MRcoA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uhhs2Ysmls ++ cat /tmp/tmp.ZrE57MRcoA ++ rm /tmp/tmp.uhhs2Ysmls /tmp/tmp.ZrE57MRcoA ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XXCYD3Rtjp +++ mktemp ++ local LAST_ERR=/tmp/tmp.GyJfFmykvQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XXCYD3Rtjp ++ cat /tmp/tmp.GyJfFmykvQ ++ rm /tmp/tmp.XXCYD3Rtjp /tmp/tmp.GyJfFmykvQ ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.85iaf5X3gX +++ mktemp ++ local LAST_ERR=/tmp/tmp.zpZ9e2zWNu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.85iaf5X3gX ++ cat /tmp/tmp.zpZ9e2zWNu ++ rm /tmp/tmp.85iaf5X3gX /tmp/tmp.zpZ9e2zWNu ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness ++ get_role_cmd '"role-one"' ++ local 'role="role-one"' ++ cmd='(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' ++ echo '(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + compare admin '(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 role-one + local database=admin + local 'command=(function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=role-one + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ymd2vIs09n +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZDhjpNWJyY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ymd2vIs09n ++ cat /tmp/tmp.ZDhjpNWJyY ++ rm /tmp/tmp.ymd2vIs09n /tmp/tmp.ZDhjpNWJyY ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.dkpHkWsSP4 ++ mktemp + local LAST_ERR=/tmp/tmp.yBI4w6bzWi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dkpHkWsSP4 + cat /tmp/tmp.yBI4w6bzWi + rm /tmp/tmp.dkpHkWsSP4 /tmp/tmp.yBI4w6bzWi + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/role-one.json /tmp/tmp.cR5mi0auxS/role-one ++ get_role_cmd '"role-two"' ++ local 'role="role-two"' ++ cmd='(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' ++ echo '(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + compare admin '(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 role-two + local database=admin + local 'command=(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=role-two + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + local 'command=use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CJwgsDxURP +++ mktemp ++ local LAST_ERR=/tmp/tmp.B2Ec6Ce4Js ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CJwgsDxURP ++ cat /tmp/tmp.B2Ec6Ce4Js ++ rm /tmp/tmp.CJwgsDxURP /tmp/tmp.B2Ec6Ce4Js ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.k57lx0irCG ++ mktemp + local LAST_ERR=/tmp/tmp.Y6axFX2nMW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.k57lx0irCG + cat /tmp/tmp.Y6axFX2nMW + rm /tmp/tmp.k57lx0irCG /tmp/tmp.Y6axFX2nMW + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/role-two.json /tmp/tmp.cR5mi0auxS/role-two + desc 'check role update from CR' + set +o xtrace ----------------------------------------------------------------------------------- check role update from CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.rCJ4jduoLL ++ mktemp + local LAST_ERR=/tmp/tmp.qpEPyWfu5Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-two", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rCJ4jduoLL perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.qpEPyWfu5Y + rm /tmp/tmp.rCJ4jduoLL /tmp/tmp.qpEPyWfu5Y + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AVLhed5juz +++ mktemp ++ local LAST_ERR=/tmp/tmp.WycQMHrzUq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AVLhed5juz ++ cat /tmp/tmp.WycQMHrzUq ++ rm /tmp/tmp.AVLhed5juz /tmp/tmp.WycQMHrzUq ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KbPzvFTh5q +++ mktemp ++ local LAST_ERR=/tmp/tmp.5BHlY9HUAT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KbPzvFTh5q ++ cat /tmp/tmp.5BHlY9HUAT ++ rm /tmp/tmp.KbPzvFTh5q /tmp/tmp.5BHlY9HUAT ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hhBmMfnnO3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jas0YlIird ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hhBmMfnnO3 ++ cat /tmp/tmp.Jas0YlIird ++ rm /tmp/tmp.hhBmMfnnO3 /tmp/tmp.Jas0YlIird ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness ++ get_role_cmd '"role-two"' ++ local 'role="role-two"' ++ cmd='(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' ++ echo '(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + compare admin '(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 role-two-updated + local database=admin + local 'command=(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=role-two-updated + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dbggL1XU0J +++ mktemp ++ local LAST_ERR=/tmp/tmp.y9Vk3IJZCg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dbggL1XU0J ++ cat /tmp/tmp.y9Vk3IJZCg ++ rm /tmp/tmp.dbggL1XU0J /tmp/tmp.y9Vk3IJZCg ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Ws4FPn0JRO ++ mktemp + local LAST_ERR=/tmp/tmp.AEKqwdQ0jM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ws4FPn0JRO + cat /tmp/tmp.AEKqwdQ0jM + rm /tmp/tmp.Ws4FPn0JRO /tmp/tmp.AEKqwdQ0jM + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/role-two-updated.json /tmp/tmp.cR5mi0auxS/role-two-updated + desc 'check role update from DB' + set +o xtrace ----------------------------------------------------------------------------------- check role update from DB ----------------------------------------------------------------------------------- + run_mongos 'use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local 'command=use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eJYX0f40zv +++ mktemp ++ local LAST_ERR=/tmp/tmp.LDERPPWnIM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eJYX0f40zv ++ cat /tmp/tmp.LDERPPWnIM ++ rm /tmp/tmp.eJYX0f40zv /tmp/tmp.LDERPPWnIM ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.UkSkyNBA88 ++ mktemp + local LAST_ERR=/tmp/tmp.kdUMWnDE8V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UkSkyNBA88 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("6678eb10-94a5-4bd7-983c-b0b70d3b372f") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db admin bye + cat /tmp/tmp.kdUMWnDE8V + rm /tmp/tmp.UkSkyNBA88 /tmp/tmp.kdUMWnDE8V + return 0 + sleep 15 ++ get_role_cmd '"role-two"' ++ local 'role="role-two"' ++ cmd='(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' ++ echo '(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + compare admin '(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 role-two-updated + local database=admin + local 'command=(function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=role-two-updated + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bue4WUjTnJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.kmZ4NPxlSs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bue4WUjTnJ ++ cat /tmp/tmp.kmZ4NPxlSs ++ rm /tmp/tmp.bue4WUjTnJ /tmp/tmp.kmZ4NPxlSs ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.v8UBG34ooO ++ mktemp + local LAST_ERR=/tmp/tmp.XNvmWUD0BT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.v8UBG34ooO + cat /tmp/tmp.XNvmWUD0BT + rm /tmp/tmp.v8UBG34ooO /tmp/tmp.XNvmWUD0BT + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/role-two-updated.json /tmp/tmp.cR5mi0auxS/role-two-updated + desc 'check new role created after updated role name via CR' + set +o xtrace ----------------------------------------------------------------------------------- check new role created after updated role name via CR ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-three", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.q8xCzCjrSd ++ mktemp + local LAST_ERR=/tmp/tmp.QQGL2ufqhY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"roles":[ { "role": "role-three", "db": "admin", "privileges": [ { "resource": { "db": "config", "collection": "" }, "actions": [ "find" ] } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q8xCzCjrSd perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.QQGL2ufqhY + rm /tmp/tmp.q8xCzCjrSd /tmp/tmp.QQGL2ufqhY + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kw9it9ZYIE +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZJHwwCe5oK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kw9it9ZYIE ++ cat /tmp/tmp.ZJHwwCe5oK ++ rm /tmp/tmp.kw9it9ZYIE /tmp/tmp.ZJHwwCe5oK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JWLwzgsRgg +++ mktemp ++ local LAST_ERR=/tmp/tmp.dBKUwAOXki ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JWLwzgsRgg ++ cat /tmp/tmp.dBKUwAOXki ++ rm /tmp/tmp.JWLwzgsRgg /tmp/tmp.dBKUwAOXki ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o26ngsQSbj +++ mktemp ++ local LAST_ERR=/tmp/tmp.dHkeoDk34M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o26ngsQSbj ++ cat /tmp/tmp.dHkeoDk34M ++ rm /tmp/tmp.o26ngsQSbj /tmp/tmp.dHkeoDk34M ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness ++ get_role_cmd '"role-three"' ++ local 'role="role-three"' ++ cmd='(function() { var role = db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' ++ echo '(function() { var role = db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + compare admin '(function() { var role = db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 role-three + local database=admin + local 'command=(function() { var role = db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=role-three + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use admin\n (function() { var role = db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use admin\n (function() { var role = db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lXNlLZCFLi +++ mktemp ++ local LAST_ERR=/tmp/tmp.SkKCegBwwr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lXNlLZCFLi ++ cat /tmp/tmp.SkKCegBwwr ++ rm /tmp/tmp.lXNlLZCFLi /tmp/tmp.SkKCegBwwr ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.0YHNwVrAjn ++ mktemp + local LAST_ERR=/tmp/tmp.RoOinBzCrg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use admin\n (function() { var role = db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0YHNwVrAjn + cat /tmp/tmp.RoOinBzCrg + rm /tmp/tmp.0YHNwVrAjn /tmp/tmp.RoOinBzCrg + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/role-three.json /tmp/tmp.cR5mi0auxS/role-three + desc 'check creating multiple roles and the users in a single CR apply' + set +o xtrace ----------------------------------------------------------------------------------- check creating multiple roles and the users in a single CR apply ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": { "roles": [ { "role": "role-four", "db": "testAdmin1", "privileges": [ { "resource": { "db": "testAdmin1", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin1", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin1", "collection": "system.version" }, "actions": [ "find" ] } ] }, { "role": "role-five", "db": "testAdmin2", "privileges": [ { "resource": { "db": "testAdmin2", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin2", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin2", "collection": "system.version" }, "actions": [ "find" ] } ] } ], "users": [ { "name": "user-five", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-four", "db": "testAdmin1" }, { "name": "role-five", "db": "testAdmin2" } ] }, { "name": "user-six", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-five", "db": "testAdmin2" } ] } ] }}' ++ mktemp + local LAST_OUT=/tmp/tmp.ZltXmmbJWi ++ mktemp + local LAST_ERR=/tmp/tmp.uGVlmfKGDA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": { "roles": [ { "role": "role-four", "db": "testAdmin1", "privileges": [ { "resource": { "db": "testAdmin1", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin1", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin1", "collection": "system.version" }, "actions": [ "find" ] } ] }, { "role": "role-five", "db": "testAdmin2", "privileges": [ { "resource": { "db": "testAdmin2", "collection": "" }, "actions": [ "find", "listIndexes", "listCollections" ] }, { "resource": { "db": "testAdmin2", "collection": "system.profile" }, "actions": [ "dbStats", "collStats", "indexStats" ] }, { "resource": { "db": "testAdmin2", "collection": "system.version" }, "actions": [ "find" ] } ] } ], "users": [ { "name": "user-five", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-four", "db": "testAdmin1" }, { "name": "role-five", "db": "testAdmin2" } ] }, { "name": "user-six", "db": "testAdmin", "passwordSecretRef": { "name": "user-one", "key": "userOnePassKey" }, "roles": [ { "name": "role-five", "db": "testAdmin2" } ] } ] }}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZltXmmbJWi perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.uGVlmfKGDA + rm /tmp/tmp.ZltXmmbJWi /tmp/tmp.uGVlmfKGDA + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7LGJdhSbp3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zwRcf70JTi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7LGJdhSbp3 ++ cat /tmp/tmp.zwRcf70JTi ++ rm /tmp/tmp.7LGJdhSbp3 /tmp/tmp.zwRcf70JTi ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rZkvFpj9Pz +++ mktemp ++ local LAST_ERR=/tmp/tmp.QkvLAUAGOT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rZkvFpj9Pz ++ cat /tmp/tmp.QkvLAUAGOT ++ rm /tmp/tmp.rZkvFpj9Pz /tmp/tmp.QkvLAUAGOT ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZdlwoVc8FM +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xh6tWIJUAB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZdlwoVc8FM ++ cat /tmp/tmp.Xh6tWIJUAB ++ rm /tmp/tmp.ZdlwoVc8FM /tmp/tmp.Xh6tWIJUAB ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness ++ get_role_cmd '"role-four"' ++ local 'role="role-four"' ++ cmd='(function() { var role = db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' ++ echo '(function() { var role = db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + compare testAdmin1 '(function() { var role = db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 role-four + local database=testAdmin1 + local 'command=(function() { var role = db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=role-four + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use testAdmin1\n (function() { var role = db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + local 'command=use testAdmin1\n (function() { var role = db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ awk -F: '{print $2}' ++ echo .svc.cluster.local egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dVtSaxtdaQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.b8GcHEY6Bl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dVtSaxtdaQ ++ cat /tmp/tmp.b8GcHEY6Bl ++ rm /tmp/tmp.dVtSaxtdaQ /tmp/tmp.b8GcHEY6Bl ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use testAdmin1\n (function() { var role = db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.OcjLBhLGal ++ mktemp + local LAST_ERR=/tmp/tmp.3wupHRSQ96 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use testAdmin1\n (function() { var role = db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OcjLBhLGal + cat /tmp/tmp.3wupHRSQ96 + rm /tmp/tmp.OcjLBhLGal /tmp/tmp.3wupHRSQ96 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/role-four.json /tmp/tmp.cR5mi0auxS/role-four ++ get_role_cmd '"role-five"' ++ local 'role="role-five"' ++ cmd='(function() { var role = db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' ++ echo '(function() { var role = db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + compare testAdmin2 '(function() { var role = db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 role-five + local database=testAdmin2 + local 'command=(function() { var role = db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=role-five + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use testAdmin2\n (function() { var role = db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + local 'command=use testAdmin2\n (function() { var role = db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oPKsStFIOy +++ mktemp ++ local LAST_ERR=/tmp/tmp.GV5fOsQkfA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oPKsStFIOy ++ cat /tmp/tmp.GV5fOsQkfA ++ rm /tmp/tmp.oPKsStFIOy /tmp/tmp.GV5fOsQkfA ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use testAdmin2\n (function() { var role = db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.RlNnnV3yN5 ++ mktemp + local LAST_ERR=/tmp/tmp.5lVlPEuw2R + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use testAdmin2\n (function() { var role = db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true}); var roles = role.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); role.roles = roles; printjson(role); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RlNnnV3yN5 + cat /tmp/tmp.5lVlPEuw2R + rm /tmp/tmp.RlNnnV3yN5 /tmp/tmp.5lVlPEuw2R + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/role-five.json /tmp/tmp.cR5mi0auxS/role-five ++ get_user_cmd '"user-five"' ++ local 'user="user-five"' ++ cmd='(function() { var user = db.getUser("user-five"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-five"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare testAdmin '(function() { var user = db.getUser("user-five"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-five + local database=testAdmin + local 'command=(function() { var user = db.getUser("user-five"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-five + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use testAdmin\n (function() { var user = db.getUser("user-five"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + local 'command=use testAdmin\n (function() { var user = db.getUser("user-five"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D6IBW6IDnm +++ mktemp ++ local LAST_ERR=/tmp/tmp.PFCk8Sw1VN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D6IBW6IDnm ++ cat /tmp/tmp.PFCk8Sw1VN ++ rm /tmp/tmp.D6IBW6IDnm /tmp/tmp.PFCk8Sw1VN ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use testAdmin\n (function() { var user = db.getUser("user-five"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.A5nDHZn3Ku ++ mktemp + local LAST_ERR=/tmp/tmp.5iDM497hsG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use testAdmin\n (function() { var user = db.getUser("user-five"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A5nDHZn3Ku + cat /tmp/tmp.5iDM497hsG + rm /tmp/tmp.A5nDHZn3Ku /tmp/tmp.5iDM497hsG + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-five.json /tmp/tmp.cR5mi0auxS/user-five ++ get_user_cmd '"user-six"' ++ local 'user="user-six"' ++ cmd='(function() { var user = db.getUser("user-six"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' ++ echo '(function() { var user = db.getUser("user-six"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + compare testAdmin '(function() { var user = db.getUser("user-six"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 user-six + local database=testAdmin + local 'command=(function() { var user = db.getUser("user-six"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local target=user-six + [[ perconalab/percona-server-mongodb-operator:main-mongod8.0 =~ 5\.0 ]] + run_mongos 'use testAdmin\n (function() { var user = db.getUser("user-six"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use testAdmin\n (function() { var user = db.getUser("user-six"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();' + local uri=userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' + sed '/"userId"/d' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5wFsb2Z8Zg +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hxz2Vp6AYX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5wFsb2Z8Zg ++ cat /tmp/tmp.Hxz2Vp6AYX ++ rm /tmp/tmp.5wFsb2Z8Zg /tmp/tmp.Hxz2Vp6AYX ++ return 0 + local client_container=psmdb-client-696897d69b-zwwz8 + kubectl_bin exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use testAdmin\n (function() { var user = db.getUser("user-six"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.RKwPQJ0ZZg ++ mktemp + local LAST_ERR=/tmp/tmp.Beas1aME9X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-zwwz8 -- bash -c 'printf '\''use testAdmin\n (function() { var user = db.getUser("user-six"); var roles = user.roles; roles.sort((a, b) => { if (a.role < b.role) return -1; if (a.role > b.role) return 1; return 0; }); user.roles = roles; printjson(user); })();\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.custom-users-roles-sharded-29269.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RKwPQJ0ZZg + cat /tmp/tmp.Beas1aME9X + rm /tmp/tmp.RKwPQJ0ZZg /tmp/tmp.Beas1aME9X + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/custom-users-roles-sharded/compare/user-six.json /tmp/tmp.cR5mi0auxS/user-six + destroy custom-users-roles-sharded-29269 + local namespace=custom-users-roles-sharded-29269 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.VEncjNZo46 +++ mktemp ++ local LAST_ERR=/tmp/tmp.iz2U9pFftj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VEncjNZo46 ++ cat /tmp/tmp.iz2U9pFftj No resources found in custom-users-roles-sharded-29269 namespace. ++ rm /tmp/tmp.VEncjNZo46 /tmp/tmp.iz2U9pFftj ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.VEIfJZ7ytw ++ mktemp + local LAST_ERR=/tmp/tmp.Vg7LsJwmkj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VEIfJZ7ytw customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.Vg7LsJwmkj + rm /tmp/tmp.VEIfJZ7ytw /tmp/tmp.Vg7LsJwmkj + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.mIW6SiZaUQ ++ mktemp + local LAST_ERR=/tmp/tmp.3VZVHrXkzf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mIW6SiZaUQ + cat /tmp/tmp.3VZVHrXkzf + rm /tmp/tmp.mIW6SiZaUQ /tmp/tmp.3VZVHrXkzf + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.4ctIxgCtbL ++ mktemp + local LAST_ERR=/tmp/tmp.OFkjuC3ucS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4ctIxgCtbL + cat /tmp/tmp.OFkjuC3ucS + rm /tmp/tmp.4ctIxgCtbL /tmp/tmp.OFkjuC3ucS + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.oRInlDDNfx ++ mktemp + local LAST_ERR=/tmp/tmp.YsHo424IBJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oRInlDDNfx + cat /tmp/tmp.YsHo424IBJ + rm /tmp/tmp.oRInlDDNfx /tmp/tmp.YsHo424IBJ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.oQxwuy9ppw ++ mktemp + local LAST_ERR=/tmp/tmp.huhSjmMdBN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oQxwuy9ppw clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.huhSjmMdBN + rm /tmp/tmp.oQxwuy9ppw /tmp/tmp.huhSjmMdBN + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.7bjom09Pvd ++ mktemp + local LAST_ERR=/tmp/tmp.IuCRBfsl0c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.7bjom09Pvd + cat /tmp/tmp.IuCRBfsl0c Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.7bjom09Pvd + cat /tmp/tmp.IuCRBfsl0c Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.7bjom09Pvd + cat /tmp/tmp.IuCRBfsl0c Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.7bjom09Pvd + cat /tmp/tmp.IuCRBfsl0c Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.7bjom09Pvd /tmp/tmp.IuCRBfsl0c + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace custom-users-roles-sharded-29269 + rm -rf /tmp/tmp.cR5mi0auxS + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.sRPxc4miMQ + local LAST_OUT=/tmp/tmp.5szCKTGmfG + desc 'test passed' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_ERR=/tmp/tmp.mTGicVdWWg + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.HIzNuDovoT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace custom-users-roles-sharded-29269 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator