Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/logs/limits.log grep: warning: stray \ before - WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + main + create_infra limits-15700 + local ns=limits-15700 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.5lSx7lePYd ++ mktemp + local LAST_ERR=/tmp/tmp.UXdoTDl0pv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5lSx7lePYd customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.UXdoTDl0pv + rm /tmp/tmp.5lSx7lePYd /tmp/tmp.UXdoTDl0pv + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.8ACcbay9Qy ++ mktemp + local LAST_ERR=/tmp/tmp.biLOtGggby + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8ACcbay9Qy + cat /tmp/tmp.biLOtGggby + rm /tmp/tmp.8ACcbay9Qy /tmp/tmp.biLOtGggby + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.sZvpZKPEm8 ++ mktemp + local LAST_ERR=/tmp/tmp.LVsxHoycWl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sZvpZKPEm8 + cat /tmp/tmp.LVsxHoycWl + rm /tmp/tmp.sZvpZKPEm8 /tmp/tmp.LVsxHoycWl + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.3wEnRpgSsM ++ mktemp + local LAST_ERR=/tmp/tmp.KQODvpV8ow + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3wEnRpgSsM + cat /tmp/tmp.KQODvpV8ow + rm /tmp/tmp.3wEnRpgSsM /tmp/tmp.KQODvpV8ow + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.ZMFiEt9FZy ++ mktemp + local LAST_ERR=/tmp/tmp.hLEn2oS05u + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZMFiEt9FZy clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted serviceaccount "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.hLEn2oS05u + rm /tmp/tmp.ZMFiEt9FZy /tmp/tmp.hLEn2oS05u + return 0 + check_crd_for_deletion PR-1961-f6beb261 + local git_tag=PR-1961-f6beb261 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1961-f6beb261/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xw7EXec2EG +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qu25ggeYOI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xw7EXec2EG ++ cat /tmp/tmp.Qu25ggeYOI Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xw7EXec2EG ++ cat /tmp/tmp.Qu25ggeYOI Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xw7EXec2EG ++ cat /tmp/tmp.Qu25ggeYOI Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.xw7EXec2EG ++ cat /tmp/tmp.Qu25ggeYOI Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.xw7EXec2EG /tmp/tmp.Qu25ggeYOI ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.sFjEt4DJYI egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.NWrR3swmGY ++ mktemp + local LAST_ERR=/tmp/tmp.aqcZ3aRSo4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.ZU6XJ2ee47 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sFjEt4DJYI + cat /tmp/tmp.aqcZ3aRSo4 + rm /tmp/tmp.sFjEt4DJYI /tmp/tmp.aqcZ3aRSo4 + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NWrR3swmGY namespace "psmdb-operator" deleted + cat /tmp/tmp.ZU6XJ2ee47 + rm /tmp/tmp.NWrR3swmGY /tmp/tmp.ZU6XJ2ee47 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Y5ANRW6uY4 ++ mktemp + local LAST_ERR=/tmp/tmp.qpJ93b67Jx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y5ANRW6uY4 + cat /tmp/tmp.qpJ93b67Jx + rm /tmp/tmp.Y5ANRW6uY4 /tmp/tmp.qpJ93b67Jx + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.oiOpODmuuJ ++ mktemp + local LAST_ERR=/tmp/tmp.sHoZVRaRFX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oiOpODmuuJ namespace/psmdb-operator created + cat /tmp/tmp.sHoZVRaRFX + rm /tmp/tmp.oiOpODmuuJ /tmp/tmp.sHoZVRaRFX + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.nxv1ybBTYu +++ mktemp ++ local LAST_ERR=/tmp/tmp.IdVmvO5vdJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nxv1ybBTYu ++ cat /tmp/tmp.IdVmvO5vdJ ++ rm /tmp/tmp.nxv1ybBTYu /tmp/tmp.IdVmvO5vdJ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster10 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.1J58xtgPQM ++ mktemp + local LAST_ERR=/tmp/tmp.0A1V8DtdiR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster10 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1J58xtgPQM Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster10" modified. + cat /tmp/tmp.0A1V8DtdiR + rm /tmp/tmp.1J58xtgPQM /tmp/tmp.0A1V8DtdiR + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/limits/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Ho5FHpAHD0 ++ mktemp + local LAST_ERR=/tmp/tmp.XVwBIBMDCZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ho5FHpAHD0 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.XVwBIBMDCZ + rm /tmp/tmp.Ho5FHpAHD0 /tmp/tmp.XVwBIBMDCZ + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.V7NABGJpG7 ++ mktemp + local LAST_ERR=/tmp/tmp.n8A7uQuzKT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.V7NABGJpG7 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.n8A7uQuzKT + rm /tmp/tmp.V7NABGJpG7 /tmp/tmp.n8A7uQuzKT + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1961-f6beb261") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.bdPd7si5no ++ mktemp + local LAST_ERR=/tmp/tmp.mbtotm1eFL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bdPd7si5no deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.mbtotm1eFL + rm /tmp/tmp.bdPd7si5no /tmp/tmp.mbtotm1eFL + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.m4AbACI4JK +++ mktemp ++ local LAST_ERR=/tmp/tmp.x6REZQQVMO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m4AbACI4JK ++ cat /tmp/tmp.x6REZQQVMO ++ rm /tmp/tmp.m4AbACI4JK /tmp/tmp.x6REZQQVMO ++ return 0 + wait_pod percona-server-mongodb-operator-5d9dc9cbbc-vxk9r + local pod=percona-server-mongodb-operator-5d9dc9cbbc-vxk9r + set +o xtrace waiting for pod/percona-server-mongodb-operator-5d9dc9cbbc-vxk9r to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.j44WUo5TcX +++ mktemp ++ local LAST_ERR=/tmp/tmp.amfzDyaJFG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j44WUo5TcX ++ cat /tmp/tmp.amfzDyaJFG ++ rm /tmp/tmp.j44WUo5TcX /tmp/tmp.amfzDyaJFG ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-5d9dc9cbbc-vxk9r ++ mktemp + local LAST_OUT=/tmp/tmp.MdwNXcFv9Y ++ mktemp + local LAST_ERR=/tmp/tmp.OqLPykgsC1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs percona-server-mongodb-operator-5d9dc9cbbc-vxk9r + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MdwNXcFv9Y + cat /tmp/tmp.OqLPykgsC1 + rm /tmp/tmp.MdwNXcFv9Y /tmp/tmp.OqLPykgsC1 + return 0 2025-08-14T11:33:35.181Z INFO setup Manager starting up {"gitCommit": "f6beb261109d64229faebd29fd920425a39c54e3", "gitBranch": "PR-1961-f6beb261", "buildTime": "", "goVersion": "go1.24.6", "os": "linux", "arch": "amd64"} + create_namespace limits-15700 + local namespace=limits-15700 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces limits-15700' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces limits-15700 ----------------------------------------------------------------------------------- + xargs kubectl delete ns + kubectl_bin delete namespace limits-15700 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.hwfOL7gy37 ++ mktemp + local LAST_OUT=/tmp/tmp.TFVHavWeYN egrep: warning: egrep is obsolescent; using grep -E + local LAST_ERR=/tmp/tmp.hzFa3dRV1o + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.kEGTIlYdxF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace limits-15700 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hwfOL7gy37 + cat /tmp/tmp.hzFa3dRV1o + rm /tmp/tmp.hwfOL7gy37 /tmp/tmp.hzFa3dRV1o + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TFVHavWeYN + cat /tmp/tmp.kEGTIlYdxF + rm /tmp/tmp.TFVHavWeYN /tmp/tmp.kEGTIlYdxF + return 0 + kubectl_bin wait --for=delete namespace limits-15700 ++ mktemp + local LAST_OUT=/tmp/tmp.rcfkW3v3vl ++ mktemp + local LAST_ERR=/tmp/tmp.fVGmAS4wSa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace limits-15700 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rcfkW3v3vl + cat /tmp/tmp.fVGmAS4wSa + rm /tmp/tmp.rcfkW3v3vl /tmp/tmp.fVGmAS4wSa + return 0 + desc 'create namespace limits-15700' + set +o xtrace ----------------------------------------------------------------------------------- create namespace limits-15700 ----------------------------------------------------------------------------------- + kubectl_bin create namespace limits-15700 ++ mktemp + local LAST_OUT=/tmp/tmp.wXjoSsIeCB ++ mktemp + local LAST_ERR=/tmp/tmp.rBMLmopqx0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace limits-15700 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wXjoSsIeCB namespace/limits-15700 created + cat /tmp/tmp.rBMLmopqx0 + rm /tmp/tmp.wXjoSsIeCB /tmp/tmp.rBMLmopqx0 + return 0 + set_kube_ctx limits-15700 + local namespace=limits-15700 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.LOI3jcotzm +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZAZMydrKRo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LOI3jcotzm ++ cat /tmp/tmp.ZAZMydrKRo ++ rm /tmp/tmp.LOI3jcotzm /tmp/tmp.ZAZMydrKRo ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster10 --namespace=limits-15700 ++ mktemp + local LAST_OUT=/tmp/tmp.yyUScxgQY0 ++ mktemp + local LAST_ERR=/tmp/tmp.Code6yd1Vo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster10 --namespace=limits-15700 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yyUScxgQY0 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-f6beb261-6-cluster10" modified. + cat /tmp/tmp.Code6yd1Vo + rm /tmp/tmp.yyUScxgQY0 /tmp/tmp.Code6yd1Vo + return 0 + desc 'create secrets' + set +o xtrace ----------------------------------------------------------------------------------- create secrets ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.hqtiJtf8jm ++ mktemp + local LAST_ERR=/tmp/tmp.6iKX1QNpMh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hqtiJtf8jm secret/some-users created + cat /tmp/tmp.6iKX1QNpMh + rm /tmp/tmp.hqtiJtf8jm /tmp/tmp.6iKX1QNpMh + return 0 + desc 'check if possible to create cluster without CPU/Memory limits' + set +o xtrace ----------------------------------------------------------------------------------- check if possible to create cluster without CPU/Memory limits ----------------------------------------------------------------------------------- + check_cr_config no-limits-rs0 + local cluster=no-limits-rs0 + desc 'create PSMDB cluster no-limits-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster no-limits-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/limits/conf/no-limits-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/limits/conf/no-limits-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/limits/conf/no-limits-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1961-f6beb261"' ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.1BAadPwmuj ++ mktemp + local LAST_ERR=/tmp/tmp.aMqrI3fwQ8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1BAadPwmuj perconaservermongodb.psmdb.percona.com/no-limits created + cat /tmp/tmp.aMqrI3fwQ8 + rm /tmp/tmp.1BAadPwmuj /tmp/tmp.aMqrI3fwQ8 + return 0 + desc 'check if at least 1 Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if at least 1 Pod started ----------------------------------------------------------------------------------- + wait_for_running no-limits-rs0 1 false + local name=no-limits-rs0 + let last_pod=0 + : + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=no-limits ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb no-limits -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xkxnODq5Kl +++ mktemp ++ local LAST_ERR=/tmp/tmp.DoiiNXxpBK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb no-limits -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xkxnODq5Kl ++ cat /tmp/tmp.DoiiNXxpBK ++ rm /tmp/tmp.xkxnODq5Kl /tmp/tmp.DoiiNXxpBK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod no-limits-rs0-0 + local pod=no-limits-rs0-0 + set +o xtrace waiting for pod/no-limits-rs0-0 to be ready.........Terminated