++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/logs/one-pod.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/logs/one-pod.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep -eks- Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep gke Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 ++ '[' v1.32.12-gke.1127000 ']' ++ GKE=1 +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/sbin/sed -r 's/[^0-9.]+//g' Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.32 + main + create_infra one-pod-6268 + local ns=one-pod-6268 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.YDWtJ9rcKe ++ mktemp + local LAST_ERR=/tmp/tmp.Vd7U6BgdIT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YDWtJ9rcKe customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.Vd7U6BgdIT + rm /tmp/tmp.YDWtJ9rcKe /tmp/tmp.Vd7U6BgdIT + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.jHWNXF4fzp ++ mktemp + local LAST_ERR=/tmp/tmp.0luGTgTIqQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jHWNXF4fzp + cat /tmp/tmp.0luGTgTIqQ + rm /tmp/tmp.jHWNXF4fzp /tmp/tmp.0luGTgTIqQ + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.MTBoDOtINj ++ mktemp + local LAST_ERR=/tmp/tmp.w84fzGed30 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MTBoDOtINj + cat /tmp/tmp.w84fzGed30 + rm /tmp/tmp.MTBoDOtINj /tmp/tmp.w84fzGed30 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.KZn7mCsa3i ++ mktemp + local LAST_ERR=/tmp/tmp.7dyf4ebWMB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KZn7mCsa3i + cat /tmp/tmp.7dyf4ebWMB + rm /tmp/tmp.KZn7mCsa3i /tmp/tmp.7dyf4ebWMB + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.Lal50ePtwZ ++ mktemp + local LAST_ERR=/tmp/tmp.bFadalShqn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Lal50ePtwZ clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.bFadalShqn + rm /tmp/tmp.Lal50ePtwZ /tmp/tmp.bFadalShqn + return 0 + check_crd_for_deletion PR-2219-d7e802db + local git_tag=PR-2219-d7e802db ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2219-d7e802db/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h5GzeZtDSJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.yuFQUgBeQA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.h5GzeZtDSJ ++ cat /tmp/tmp.yuFQUgBeQA Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.h5GzeZtDSJ ++ cat /tmp/tmp.yuFQUgBeQA Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.h5GzeZtDSJ ++ cat /tmp/tmp.yuFQUgBeQA Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.h5GzeZtDSJ ++ cat /tmp/tmp.yuFQUgBeQA Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.h5GzeZtDSJ /tmp/tmp.yuFQUgBeQA ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- ++ mktemp cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.8wXRlodo0k ++ mktemp + local LAST_OUT=/tmp/tmp.AKRUUDwyX4 ++ mktemp + local LAST_ERR=/tmp/tmp.P85yEmEWR9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.gc2LzzXDub + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8wXRlodo0k + cat /tmp/tmp.P85yEmEWR9 + rm /tmp/tmp.8wXRlodo0k /tmp/tmp.P85yEmEWR9 + return 0 namespace "one-pod-9888" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AKRUUDwyX4 namespace "psmdb-operator" deleted + cat /tmp/tmp.gc2LzzXDub + rm /tmp/tmp.AKRUUDwyX4 /tmp/tmp.gc2LzzXDub + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.3Q8wiS1SHK ++ mktemp + local LAST_ERR=/tmp/tmp.QInhlv5amD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3Q8wiS1SHK + cat /tmp/tmp.QInhlv5amD + rm /tmp/tmp.3Q8wiS1SHK /tmp/tmp.QInhlv5amD + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.dBSLK04mMK ++ mktemp + local LAST_ERR=/tmp/tmp.3JaIXkHaji + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dBSLK04mMK namespace/psmdb-operator created + cat /tmp/tmp.3JaIXkHaji + rm /tmp/tmp.dBSLK04mMK /tmp/tmp.3JaIXkHaji + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.OQ7N56Gr2S +++ mktemp ++ local LAST_ERR=/tmp/tmp.qWC4TeTW5v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OQ7N56Gr2S ++ cat /tmp/tmp.qWC4TeTW5v ++ rm /tmp/tmp.OQ7N56Gr2S /tmp/tmp.qWC4TeTW5v ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster4 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ckKRD5oa9Y ++ mktemp + local LAST_ERR=/tmp/tmp.oG0uBAY9sk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster4 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ckKRD5oa9Y Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster4" modified. + cat /tmp/tmp.oG0uBAY9sk + rm /tmp/tmp.ckKRD5oa9Y /tmp/tmp.oG0uBAY9sk + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.rJ3ghR1Vd7 ++ mktemp + local LAST_ERR=/tmp/tmp.hXYKhCrdda + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rJ3ghR1Vd7 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.hXYKhCrdda + rm /tmp/tmp.rJ3ghR1Vd7 /tmp/tmp.hXYKhCrdda + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.FuMclynDrv ++ mktemp + local LAST_ERR=/tmp/tmp.PnBkkn1QAi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FuMclynDrv clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.PnBkkn1QAi + rm /tmp/tmp.FuMclynDrv /tmp/tmp.PnBkkn1QAi + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.sjstqnuYb2 ++ mktemp + local LAST_ERR=/tmp/tmp.c0H7xIhp4t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sjstqnuYb2 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.c0H7xIhp4t + rm /tmp/tmp.sjstqnuYb2 /tmp/tmp.c0H7xIhp4t + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.0BBrXiTsjm +++ mktemp ++ local LAST_ERR=/tmp/tmp.akhVbWFUnG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0BBrXiTsjm ++ cat /tmp/tmp.akhVbWFUnG ++ rm /tmp/tmp.0BBrXiTsjm /tmp/tmp.akhVbWFUnG ++ return 0 + wait_operator_pod percona-server-mongodb-operator-76758dcf9-lpr2p + local pod=percona-server-mongodb-operator-76758dcf9-lpr2p + set +o xtrace waiting for pod/percona-server-mongodb-operator-76758dcf9-lpr2p to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.cnVHaeqPUa +++ mktemp ++ local LAST_ERR=/tmp/tmp.wlMFedB5Ue ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cnVHaeqPUa ++ cat /tmp/tmp.wlMFedB5Ue ++ rm /tmp/tmp.cnVHaeqPUa /tmp/tmp.wlMFedB5Ue ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-76758dcf9-lpr2p ++ mktemp + local LAST_OUT=/tmp/tmp.07e68wteW9 ++ mktemp + local LAST_ERR=/tmp/tmp.Wt7FxefJ6B + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-76758dcf9-lpr2p + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.07e68wteW9 + cat /tmp/tmp.Wt7FxefJ6B + rm /tmp/tmp.07e68wteW9 /tmp/tmp.Wt7FxefJ6B + return 0 2026-03-10T22:35:16.307Z INFO setup Manager starting up {"gitCommit": "d7e802db10c9b2b2028f56c7b5227cb276fe5878", "gitBranch": "PR-2219-d7e802db", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace one-pod-6268 + local namespace=one-pod-6268 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces one-pod-6268' + set +o xtrace + xargs kubectl delete ns ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces one-pod-6268 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace one-pod-6268 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.ySTGOc1yLi + local LAST_OUT=/tmp/tmp.W5jH6GPTN4 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.oun7ceDpIp + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.aHWuZfBZYC + for i in $(seq 0 2) + local exit_status=0 + local timeout=4 + set +e + kubectl delete namespace one-pod-6268 --ignore-not-found ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W5jH6GPTN4 + cat /tmp/tmp.aHWuZfBZYC + rm /tmp/tmp.W5jH6GPTN4 /tmp/tmp.aHWuZfBZYC + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ySTGOc1yLi + cat /tmp/tmp.oun7ceDpIp + rm /tmp/tmp.ySTGOc1yLi /tmp/tmp.oun7ceDpIp + return 0 + kubectl_bin wait --for=delete namespace one-pod-6268 ++ mktemp + local LAST_OUT=/tmp/tmp.fZbmCoUswH ++ mktemp + local LAST_ERR=/tmp/tmp.quMUe8Kg4Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace one-pod-6268 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fZbmCoUswH + cat /tmp/tmp.quMUe8Kg4Z + rm /tmp/tmp.fZbmCoUswH /tmp/tmp.quMUe8Kg4Z + return 0 + desc 'create namespace one-pod-6268' + set +o xtrace ----------------------------------------------------------------------------------- create namespace one-pod-6268 ----------------------------------------------------------------------------------- + kubectl_bin create namespace one-pod-6268 ++ mktemp + local LAST_OUT=/tmp/tmp.3zyhHNcFQZ ++ mktemp + local LAST_ERR=/tmp/tmp.i3M3ze76Ll + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace one-pod-6268 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3zyhHNcFQZ namespace/one-pod-6268 created + cat /tmp/tmp.i3M3ze76Ll + rm /tmp/tmp.3zyhHNcFQZ /tmp/tmp.i3M3ze76Ll + return 0 + set_kube_ctx one-pod-6268 + local namespace=one-pod-6268 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.v059PTj6AA +++ mktemp ++ local LAST_ERR=/tmp/tmp.fmCUNg68XV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v059PTj6AA ++ cat /tmp/tmp.fmCUNg68XV ++ rm /tmp/tmp.v059PTj6AA /tmp/tmp.fmCUNg68XV ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster4 --namespace=one-pod-6268 ++ mktemp + local LAST_OUT=/tmp/tmp.ScycFBuWsI ++ mktemp + local LAST_ERR=/tmp/tmp.axd766FiNJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster4 --namespace=one-pod-6268 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ScycFBuWsI Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster4" modified. + cat /tmp/tmp.axd766FiNJ + rm /tmp/tmp.ScycFBuWsI /tmp/tmp.axd766FiNJ + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.VVqgWmUfpM ++ mktemp + local LAST_ERR=/tmp/tmp.t4EsJIzL6B + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VVqgWmUfpM deployment.apps/psmdb-client created secret/some-users created secret/minio-secret created + cat /tmp/tmp.t4EsJIzL6B + rm /tmp/tmp.VVqgWmUfpM /tmp/tmp.t4EsJIzL6B + return 0 + deploy_minio + local cert_secret= + local service_name=minio-service + desc 'install MinIO: minio-service' + set +o xtrace ----------------------------------------------------------------------------------- install MinIO: minio-service ----------------------------------------------------------------------------------- + helm uninstall minio-service + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + local endpoint=http://minio-service:9000 + minio_args=('--version' '5.4.0' '--set' 'replicas=1' '--set' 'mode=standalone' '--set' 'resources.requests.memory=256Mi' '--set' 'rootUser=rootuser' '--set' 'rootPassword=rootpass123' '--set' 'users[0].accessKey=some-access-key' '--set' 'users[0].secretKey=some-secret-key' '--set' 'users[0].policy=consoleAdmin' '--set' 'service.type=ClusterIP' '--set' 'configPathmc=/tmp/' '--set' 'securityContext.enabled=false' '--set' 'persistence.size=2G' '--set' 'fullnameOverride=minio-service' '--set' 'serviceAccount.create=true' '--set' 'serviceAccount.name=minio-service-sa') + local minio_args + [[ -n '' ]] + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio NAME: minio-service LAST DEPLOYED: Tue Mar 10 22:35:57 2026 NAMESPACE: one-pod-6268 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.one-pod-6268.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace one-pod-6268 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace one-pod-6268 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace one-pod-6268 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace one-pod-6268 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fz4cqDf8Qo +++ mktemp ++ local LAST_ERR=/tmp/tmp.RP4aLT28Mt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fz4cqDf8Qo ++ cat /tmp/tmp.RP4aLT28Mt ++ rm /tmp/tmp.fz4cqDf8Qo /tmp/tmp.RP4aLT28Mt ++ return 0 + local MINIO_POD=minio-service-6d5f646cdc-vtk5g + wait_pod minio-service-6d5f646cdc-vtk5g + local pod=minio-service-6d5f646cdc-vtk5g + set +o xtrace waiting for pod/minio-service-6d5f646cdc-vtk5g to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.one-pod-6268.svc.cluster.local --tcp=9000 service/minio-service created + create_minio_bucket operator-testing http://minio-service:9000 + local bucket=operator-testing + local endpoint=http://minio-service:9000 + kubectl_bin run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.IT1KZSqE6V ++ mktemp + local LAST_ERR=/tmp/tmp.60thjOVd7P + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IT1KZSqE6V make_bucket: operator-testing pod "aws-cli" deleted from one-pod-6268 namespace + cat /tmp/tmp.60thjOVd7P + rm /tmp/tmp.IT1KZSqE6V /tmp/tmp.60thjOVd7P + return 0 + cluster=one-pod-rs0 + spinup_psmdb one-pod-rs0 /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/conf/one-pod-rs0.yml 1 + local cluster=one-pod-rs0 + local config=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/conf/one-pod-rs0.yml + local size=1 + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/conf/one-pod-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/conf/one-pod-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/conf/one-pod-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db"' + local LAST_OUT=/tmp/tmp.hEwd273hOi + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/one-pod-6268/g ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_ERR=/tmp/tmp.LiYhATRHnN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hEwd273hOi perconaservermongodb.psmdb.percona.com/one-pod created + cat /tmp/tmp.LiYhATRHnN + rm /tmp/tmp.hEwd273hOi /tmp/tmp.LiYhATRHnN + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running one-pod-rs0 1 + local name=one-pod-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=one-pod ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eBmT3TLfqh +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gk5fXpC8rT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eBmT3TLfqh ++ cat /tmp/tmp.Gk5fXpC8rT ++ rm /tmp/tmp.eBmT3TLfqh /tmp/tmp.Gk5fXpC8rT ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod one-pod-rs0-0 + local pod=one-pod-rs0-0 + set +o xtrace waiting for pod/one-pod-rs0-0 to be ready.......OK ++ kubectl_bin get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4B9i7pKLXN +++ mktemp ++ local LAST_ERR=/tmp/tmp.7300VtnXpX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4B9i7pKLXN ++ cat /tmp/tmp.7300VtnXpX ++ rm /tmp/tmp.4B9i7pKLXN /tmp/tmp.7300VtnXpX ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q3cID2h9dO +++ mktemp ++ local LAST_ERR=/tmp/tmp.wbzn4uFvJD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q3cID2h9dO ++ cat /tmp/tmp.wbzn4uFvJD ++ rm /tmp/tmp.Q3cID2h9dO /tmp/tmp.wbzn4uFvJD ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................... + sleep 20 + compare_kubectl statefulset/one-pod-rs0 + local resource=statefulset/one-pod-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/statefulset_one-pod-rs0.yml + local new_result=/tmp/tmp.NevJPw982B/statefulset_one-pod-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/statefulset_one-pod-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/one-pod-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("one-pod-6268", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Vkss7j2ae6 ++ mktemp + local LAST_ERR=/tmp/tmp.4fQCRUYCav + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/one-pod-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Vkss7j2ae6 + cat /tmp/tmp.4fQCRUYCav + rm /tmp/tmp.Vkss7j2ae6 /tmp/tmp.4fQCRUYCav + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.NevJPw982B/statefulset_one-pod-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.NevJPw982B/statefulset_one-pod-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.NevJPw982B/statefulset_one-pod-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/statefulset_one-pod-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/statefulset_one-pod-rs0.yml /tmp/tmp.NevJPw982B/statefulset_one-pod-rs0.yml + log 'compare_kubectl: statefulset/one-pod-rs0 OK' + set +o xtrace [2026-03-10T22:38:05+0000] compare_kubectl: statefulset/one-pod-rs0 OK + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@one-pod-rs0.one-pod-6268 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@one-pod-rs0.one-pod-6268 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@one-pod-rs0.one-pod-6268 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WpGWUJAh7B +++ mktemp ++ local LAST_ERR=/tmp/tmp.BXwPxQD96s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WpGWUJAh7B ++ cat /tmp/tmp.BXwPxQD96s ++ rm /tmp/tmp.WpGWUJAh7B /tmp/tmp.BXwPxQD96s ++ return 0 + local client_container=psmdb-client-bb8b97679-9jtbd + kubectl_bin exec psmdb-client-bb8b97679-9jtbd -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@one-pod-rs0.one-pod-6268.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.MCe6cTcMN8 ++ mktemp + local LAST_ERR=/tmp/tmp.j2qK7a6D2I + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9jtbd -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@one-pod-rs0.one-pod-6268.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MCe6cTcMN8 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://one-pod-rs0-0.one-pod-rs0.one-pod-6268.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("dcb7a036-eda6-48c4-a68b-e94b843c55d1") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.j2qK7a6D2I + rm /tmp/tmp.MCe6cTcMN8 /tmp/tmp.j2qK7a6D2I + return 0 + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@one-pod-rs0.one-pod-6268 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@one-pod-rs0.one-pod-6268 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@one-pod-rs0.one-pod-6268 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jSR0cDGQyl +++ mktemp ++ local LAST_ERR=/tmp/tmp.WglqTq3QiL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jSR0cDGQyl ++ cat /tmp/tmp.WglqTq3QiL ++ rm /tmp/tmp.jSR0cDGQyl /tmp/tmp.WglqTq3QiL ++ return 0 + local client_container=psmdb-client-bb8b97679-9jtbd + kubectl_bin exec psmdb-client-bb8b97679-9jtbd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@one-pod-rs0.one-pod-6268.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.wAGv698ZJs ++ mktemp + local LAST_ERR=/tmp/tmp.pCOyOidp9q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9jtbd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@one-pod-rs0.one-pod-6268.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wAGv698ZJs Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://one-pod-rs0-0.one-pod-rs0.one-pod-6268.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("2257557f-591c-4e1a-b540-2e2f7a35c5a7") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.pCOyOidp9q + rm /tmp/tmp.wAGv698ZJs /tmp/tmp.pCOyOidp9q + return 0 + wait_cluster_consistency one-pod + local cluster_name=one-pod + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb one-pod -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5rOuW9sdGw +++ mktemp ++ local LAST_ERR=/tmp/tmp.OEA0ISavGA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5rOuW9sdGw ++ cat /tmp/tmp.OEA0ISavGA ++ rm /tmp/tmp.5rOuW9sdGw /tmp/tmp.OEA0ISavGA ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + desc 'check if service and pvc created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and pvc created with expected config ----------------------------------------------------------------------------------- + compare_kubectl service/one-pod-rs0 + local resource=service/one-pod-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/service_one-pod-rs0.yml + local new_result=/tmp/tmp.NevJPw982B/service_one-pod-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/service_one-pod-rs0-oc.yml ']' + kubectl_bin get -o yaml service/one-pod-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("one-pod-6268", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.1BpVDxv26r ++ mktemp + local LAST_ERR=/tmp/tmp.FiE2kL6Buy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/one-pod-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1BpVDxv26r + cat /tmp/tmp.FiE2kL6Buy + rm /tmp/tmp.1BpVDxv26r /tmp/tmp.FiE2kL6Buy + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.NevJPw982B/service_one-pod-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.NevJPw982B/service_one-pod-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.NevJPw982B/service_one-pod-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/service_one-pod-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/service_one-pod-rs0.yml /tmp/tmp.NevJPw982B/service_one-pod-rs0.yml + log 'compare_kubectl: service/one-pod-rs0 OK' + set +o xtrace [2026-03-10T22:38:19+0000] compare_kubectl: service/one-pod-rs0 OK + compare_kubectl pvc/mongod-data-one-pod-rs0-0 + local resource=pvc/mongod-data-one-pod-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/pvc_mongod-data-one-pod-rs0-0.yml + local new_result=/tmp/tmp.NevJPw982B/pvc_mongod-data-one-pod-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/pvc_mongod-data-one-pod-rs0-0-oc.yml ']' + kubectl_bin get -o yaml pvc/mongod-data-one-pod-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("one-pod-6268", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.puLOEnjfo2 ++ mktemp + local LAST_ERR=/tmp/tmp.xoRvcOU0Lj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml pvc/mongod-data-one-pod-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.puLOEnjfo2 + cat /tmp/tmp.xoRvcOU0Lj + rm /tmp/tmp.puLOEnjfo2 /tmp/tmp.xoRvcOU0Lj + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.NevJPw982B/pvc_mongod-data-one-pod-rs0-0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.NevJPw982B/pvc_mongod-data-one-pod-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.NevJPw982B/pvc_mongod-data-one-pod-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/pvc_mongod-data-one-pod-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/pvc_mongod-data-one-pod-rs0-0.yml /tmp/tmp.NevJPw982B/pvc_mongod-data-one-pod-rs0-0.yml + log 'compare_kubectl: pvc/mongod-data-one-pod-rs0-0 OK' + set +o xtrace [2026-03-10T22:38:20+0000] compare_kubectl: pvc/mongod-data-one-pod-rs0-0 OK + desc 'check system log' + set +o xtrace ----------------------------------------------------------------------------------- check system log ----------------------------------------------------------------------------------- + run_mongo 'db.serverCmdLineOpts()' clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268 + local 'command=db.serverCmdLineOpts()' + local uri=clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' + /usr/sbin/sed -re 's/((Timestamp|BinData|NumberLong)\((.+?\)))/{}/g' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + jq .parsed.systemLog +++ mktemp ++ local LAST_OUT=/tmp/tmp.YlMNWD042b +++ mktemp ++ local LAST_ERR=/tmp/tmp.rppHlwFnp6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YlMNWD042b ++ cat /tmp/tmp.rppHlwFnp6 ++ rm /tmp/tmp.YlMNWD042b /tmp/tmp.rppHlwFnp6 ++ return 0 + local client_container=psmdb-client-bb8b97679-9jtbd + kubectl_bin exec psmdb-client-bb8b97679-9jtbd -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.NyWkwviSJH ++ mktemp + local LAST_ERR=/tmp/tmp.fkL1tRpEpY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9jtbd -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NyWkwviSJH + cat /tmp/tmp.fkL1tRpEpY + rm /tmp/tmp.NyWkwviSJH /tmp/tmp.fkL1tRpEpY + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/serverCmdLineOpts_parsed_systemLog.json /tmp/tmp.NevJPw982B/parsed_systemLog.json + desc 'create secret and check custom config' + set +o xtrace ----------------------------------------------------------------------------------- create secret and check custom config ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/conf/mongod-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.hRHxcKrVrC ++ mktemp + local LAST_ERR=/tmp/tmp.cgVKkeXbpC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/conf/mongod-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hRHxcKrVrC secret/one-pod-rs0-mongod created + cat /tmp/tmp.cgVKkeXbpC + rm /tmp/tmp.hRHxcKrVrC /tmp/tmp.cgVKkeXbpC + return 0 + wait_cluster_consistency one-pod + local cluster_name=one-pod + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb one-pod -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.paEh26cr6N +++ mktemp ++ local LAST_ERR=/tmp/tmp.XbDGHnkmLC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.paEh26cr6N ++ cat /tmp/tmp.XbDGHnkmLC ++ rm /tmp/tmp.paEh26cr6N /tmp/tmp.XbDGHnkmLC ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/one-pod-rs0 -secret + local resource=statefulset/one-pod-rs0 + local postfix=-secret + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/statefulset_one-pod-rs0-secret.yml + local new_result=/tmp/tmp.NevJPw982B/statefulset_one-pod-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/statefulset_one-pod-rs0-secret-oc.yml ']' + kubectl_bin get -o yaml statefulset/one-pod-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | ++ mktemp del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("one-pod-6268", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.54Zxzq6Oi6 ++ mktemp + local LAST_ERR=/tmp/tmp.HXejNRZHsx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/one-pod-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.54Zxzq6Oi6 + cat /tmp/tmp.HXejNRZHsx + rm /tmp/tmp.54Zxzq6Oi6 /tmp/tmp.HXejNRZHsx + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.NevJPw982B/statefulset_one-pod-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.NevJPw982B/statefulset_one-pod-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.NevJPw982B/statefulset_one-pod-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/statefulset_one-pod-rs0-secret.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/one-pod/compare/statefulset_one-pod-rs0-secret.yml /tmp/tmp.NevJPw982B/statefulset_one-pod-rs0.yml + log 'compare_kubectl: statefulset/one-pod-rs0 OK' + set +o xtrace [2026-03-10T22:38:34+0000] compare_kubectl: statefulset/one-pod-rs0 OK + run_mongo 'db.serverCmdLineOpts()' clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268 + local 'command=db.serverCmdLineOpts()' + local uri=clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/((Timestamp|BinData|NumberLong)\((.+?\)))/{}/g' + jq .parsed.systemLog +++ mktemp ++ local LAST_OUT=/tmp/tmp.TXCKND4JQh +++ mktemp ++ local LAST_ERR=/tmp/tmp.cl2NfcdoR7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TXCKND4JQh ++ cat /tmp/tmp.cl2NfcdoR7 ++ rm /tmp/tmp.TXCKND4JQh /tmp/tmp.cl2NfcdoR7 ++ return 0 + local client_container=psmdb-client-bb8b97679-9jtbd + kubectl_bin exec psmdb-client-bb8b97679-9jtbd -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.hXTjQylzbe ++ mktemp + local LAST_ERR=/tmp/tmp.Hpo3ZLNw2r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9jtbd -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.hXTjQylzbe + cat /tmp/tmp.Hpo3ZLNw2r command terminated with exit code 1 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9jtbd -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=2 + set -e + '[' 2 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.hXTjQylzbe + cat /tmp/tmp.Hpo3ZLNw2r DNSHostNotFound: Failed to look up service "_mongodb._tcp.one-pod-rs0.one-pod-6268.svc.cluster.local": Success try 'mongo --help' for more information command terminated with exit code 2 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9jtbd -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-6268.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=2 + set -e + '[' 2 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.hXTjQylzbe + cat /tmp/tmp.Hpo3ZLNw2r DNSHostNotFound: Failed to look up service "_mongodb._tcp.one-pod-rs0.one-pod-6268.svc.cluster.local": Success try 'mongo --help' for more information command terminated with exit code 2 + sleep 8 + cat /tmp/tmp.hXTjQylzbe + cat /tmp/tmp.Hpo3ZLNw2r DNSHostNotFound: Failed to look up service "_mongodb._tcp.one-pod-rs0.one-pod-6268.svc.cluster.local": Success try 'mongo --help' for more information command terminated with exit code 2 + rm /tmp/tmp.hXTjQylzbe /tmp/tmp.Hpo3ZLNw2r + return 2 jq: parse error: Invalid numeric literal at line 2, column 6