++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/logs/one-pod.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/logs/one-pod.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' grep: warning: stray \ before - Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep gke Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 ++ '[' v1.31.14-gke.1114000 ']' ++ GKE=1 +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ kubectl version -o json +++ /usr/sbin/sed -r 's/[^0-9.]+//g' Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.31 + main + create_infra one-pod-18702 + local ns=one-pod-18702 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.vNPf5cOHTp ++ mktemp + local LAST_ERR=/tmp/tmp.v2SNE8vWDe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vNPf5cOHTp customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.v2SNE8vWDe + rm /tmp/tmp.vNPf5cOHTp /tmp/tmp.v2SNE8vWDe + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.EWGDLyg9B4 ++ mktemp + local LAST_ERR=/tmp/tmp.BliO7aSb7T + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EWGDLyg9B4 + cat /tmp/tmp.BliO7aSb7T + rm /tmp/tmp.EWGDLyg9B4 /tmp/tmp.BliO7aSb7T + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.4RvuGXMX35 ++ mktemp + local LAST_ERR=/tmp/tmp.HRGKhqZsNg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4RvuGXMX35 + cat /tmp/tmp.HRGKhqZsNg + rm /tmp/tmp.4RvuGXMX35 /tmp/tmp.HRGKhqZsNg + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.kMIb5KNRRk ++ mktemp + local LAST_ERR=/tmp/tmp.NLFaUV1imZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kMIb5KNRRk + cat /tmp/tmp.NLFaUV1imZ + rm /tmp/tmp.kMIb5KNRRk /tmp/tmp.NLFaUV1imZ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.n6fnvfnpFa ++ mktemp + local LAST_ERR=/tmp/tmp.v2eVuw1ugQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n6fnvfnpFa clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.v2eVuw1ugQ + rm /tmp/tmp.n6fnvfnpFa /tmp/tmp.v2eVuw1ugQ + return 0 + check_crd_for_deletion PR-2163-64e6d5ce + local git_tag=PR-2163-64e6d5ce ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2163-64e6d5ce/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8wG7aPf3iy +++ mktemp ++ local LAST_ERR=/tmp/tmp.E97qGJnd2f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.8wG7aPf3iy ++ cat /tmp/tmp.E97qGJnd2f Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.8wG7aPf3iy ++ cat /tmp/tmp.E97qGJnd2f Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.8wG7aPf3iy ++ cat /tmp/tmp.E97qGJnd2f Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.8wG7aPf3iy ++ cat /tmp/tmp.E97qGJnd2f Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.8wG7aPf3iy /tmp/tmp.E97qGJnd2f ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.E1Hamb6oVt ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.f4BATey6Qz ++ mktemp + local LAST_ERR=/tmp/tmp.2eylxLzmSg + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.CS1AJTNhv0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E1Hamb6oVt + cat /tmp/tmp.2eylxLzmSg + rm /tmp/tmp.E1Hamb6oVt /tmp/tmp.2eylxLzmSg + return 0 namespace "one-pod-5914" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f4BATey6Qz namespace "psmdb-operator" deleted + cat /tmp/tmp.CS1AJTNhv0 + rm /tmp/tmp.f4BATey6Qz /tmp/tmp.CS1AJTNhv0 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ftxztHVC8Y ++ mktemp + local LAST_ERR=/tmp/tmp.UpRJcZZxtm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ftxztHVC8Y + cat /tmp/tmp.UpRJcZZxtm + rm /tmp/tmp.ftxztHVC8Y /tmp/tmp.UpRJcZZxtm + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.3ebbWIfnr6 ++ mktemp + local LAST_ERR=/tmp/tmp.l3tX4ro73k + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3ebbWIfnr6 namespace/psmdb-operator created + cat /tmp/tmp.l3tX4ro73k + rm /tmp/tmp.3ebbWIfnr6 /tmp/tmp.l3tX4ro73k + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.WVLF3sNeHt +++ mktemp ++ local LAST_ERR=/tmp/tmp.R11jZg8H9u ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WVLF3sNeHt ++ cat /tmp/tmp.R11jZg8H9u ++ rm /tmp/tmp.WVLF3sNeHt /tmp/tmp.R11jZg8H9u ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2163-64e6d5ce-9-cluster6 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.aAXJ8PO7pM ++ mktemp + local LAST_ERR=/tmp/tmp.btOWsLYIlx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2163-64e6d5ce-9-cluster6 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aAXJ8PO7pM Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2163-64e6d5ce-9-cluster6" modified. + cat /tmp/tmp.btOWsLYIlx + rm /tmp/tmp.aAXJ8PO7pM /tmp/tmp.btOWsLYIlx + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2163-64e6d5ce' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2163-64e6d5ce ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.gzE9ocOG8w ++ mktemp + local LAST_ERR=/tmp/tmp.nsc728Tp5e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gzE9ocOG8w customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.nsc728Tp5e + rm /tmp/tmp.gzE9ocOG8w /tmp/tmp.nsc728Tp5e + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.juT5yleotL ++ mktemp + local LAST_ERR=/tmp/tmp.DzfdExYfoJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.juT5yleotL clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.DzfdExYfoJ + rm /tmp/tmp.juT5yleotL /tmp/tmp.DzfdExYfoJ + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2163-64e6d5ce") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.lhY8RhOojo ++ mktemp + local LAST_ERR=/tmp/tmp.TAPDtKzJVR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lhY8RhOojo deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.TAPDtKzJVR + rm /tmp/tmp.lhY8RhOojo /tmp/tmp.TAPDtKzJVR + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.gvq7mc3PxN +++ mktemp ++ local LAST_ERR=/tmp/tmp.93RD6xFi56 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gvq7mc3PxN ++ cat /tmp/tmp.93RD6xFi56 ++ rm /tmp/tmp.gvq7mc3PxN /tmp/tmp.93RD6xFi56 ++ return 0 + wait_operator_pod percona-server-mongodb-operator-58f68696bf-4xr6v + local pod=percona-server-mongodb-operator-58f68696bf-4xr6v + set +o xtrace waiting for pod/percona-server-mongodb-operator-58f68696bf-4xr6v to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.4aoIJuxaYf +++ mktemp ++ local LAST_ERR=/tmp/tmp.GDkLytN40k ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4aoIJuxaYf ++ cat /tmp/tmp.GDkLytN40k ++ rm /tmp/tmp.4aoIJuxaYf /tmp/tmp.GDkLytN40k ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-58f68696bf-4xr6v ++ mktemp + local LAST_OUT=/tmp/tmp.fUsOozo9Te ++ mktemp + local LAST_ERR=/tmp/tmp.3fIU9Nj5cQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-58f68696bf-4xr6v + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fUsOozo9Te + cat /tmp/tmp.3fIU9Nj5cQ + rm /tmp/tmp.fUsOozo9Te /tmp/tmp.3fIU9Nj5cQ + return 0 2025-12-24T10:56:35.401Z INFO setup Manager starting up {"gitCommit": "64e6d5ce793e3e7e64e1e3aa064fcc83fbdef614", "gitBranch": "PR-2163-64e6d5ce", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace one-pod-18702 + local namespace=one-pod-18702 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces one-pod-18702' + xargs kubectl delete ns ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces one-pod-18702 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace one-pod-18702 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.MbXMSCfHHS egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_ERR=/tmp/tmp.RIBeXGpe1r + local exit_status=0 + local timeout=4 + local LAST_OUT=/tmp/tmp.2g7GII10EF ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.C7maMJnT3V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace one-pod-18702 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MbXMSCfHHS + cat /tmp/tmp.RIBeXGpe1r + rm /tmp/tmp.MbXMSCfHHS /tmp/tmp.RIBeXGpe1r + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2g7GII10EF + cat /tmp/tmp.C7maMJnT3V + rm /tmp/tmp.2g7GII10EF /tmp/tmp.C7maMJnT3V + return 0 + kubectl_bin wait --for=delete namespace one-pod-18702 ++ mktemp + local LAST_OUT=/tmp/tmp.sT2hJi1fAp ++ mktemp + local LAST_ERR=/tmp/tmp.SxKSuYO88J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace one-pod-18702 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sT2hJi1fAp + cat /tmp/tmp.SxKSuYO88J + rm /tmp/tmp.sT2hJi1fAp /tmp/tmp.SxKSuYO88J + return 0 + desc 'create namespace one-pod-18702' + set +o xtrace ----------------------------------------------------------------------------------- create namespace one-pod-18702 ----------------------------------------------------------------------------------- + kubectl_bin create namespace one-pod-18702 ++ mktemp + local LAST_OUT=/tmp/tmp.OELmIaWQte ++ mktemp + local LAST_ERR=/tmp/tmp.8stykU00PC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace one-pod-18702 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OELmIaWQte namespace/one-pod-18702 created + cat /tmp/tmp.8stykU00PC + rm /tmp/tmp.OELmIaWQte /tmp/tmp.8stykU00PC + return 0 + set_kube_ctx one-pod-18702 + local namespace=one-pod-18702 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.rTLcBcqCpt +++ mktemp ++ local LAST_ERR=/tmp/tmp.ku6nzzBNpY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rTLcBcqCpt ++ cat /tmp/tmp.ku6nzzBNpY ++ rm /tmp/tmp.rTLcBcqCpt /tmp/tmp.ku6nzzBNpY ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2163-64e6d5ce-9-cluster6 --namespace=one-pod-18702 ++ mktemp + local LAST_OUT=/tmp/tmp.X0nhtKvK4j ++ mktemp + local LAST_ERR=/tmp/tmp.M2bSFLPCoM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2163-64e6d5ce-9-cluster6 --namespace=one-pod-18702 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X0nhtKvK4j Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2163-64e6d5ce-9-cluster6" modified. + cat /tmp/tmp.M2bSFLPCoM + rm /tmp/tmp.X0nhtKvK4j /tmp/tmp.M2bSFLPCoM + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.3xzpEWOkjp ++ mktemp + local LAST_ERR=/tmp/tmp.FqnPtheiuC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3xzpEWOkjp deployment.apps/psmdb-client created secret/some-users created secret/minio-secret created + cat /tmp/tmp.FqnPtheiuC + rm /tmp/tmp.3xzpEWOkjp /tmp/tmp.FqnPtheiuC + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Wed Dec 24 10:57:16 2025 NAMESPACE: one-pod-18702 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.one-pod-18702.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace one-pod-18702 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace one-pod-18702 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace one-pod-18702 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace one-pod-18702 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.99A1CC93OK +++ mktemp ++ local LAST_ERR=/tmp/tmp.lZ5oXjKns4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.99A1CC93OK ++ cat /tmp/tmp.lZ5oXjKns4 ++ rm /tmp/tmp.99A1CC93OK /tmp/tmp.lZ5oXjKns4 ++ return 0 + MINIO_POD=minio-service-d9589b474-g8h67 + wait_pod minio-service-d9589b474-g8h67 + local pod=minio-service-d9589b474-g8h67 + set +o xtrace waiting for pod/minio-service-d9589b474-g8h67 to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.one-pod-18702.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.aHsQe6uvhH ++ mktemp + local LAST_ERR=/tmp/tmp.po2XF7M101 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.one-pod-18702.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aHsQe6uvhH service/minio-service created + cat /tmp/tmp.po2XF7M101 + rm /tmp/tmp.aHsQe6uvhH /tmp/tmp.po2XF7M101 + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.Yd14fcAFxG ++ mktemp + local LAST_ERR=/tmp/tmp.BT01irBtzf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Yd14fcAFxG pod "aws-cli" deleted from one-pod-18702 namespace + cat /tmp/tmp.BT01irBtzf All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.Yd14fcAFxG /tmp/tmp.BT01irBtzf + return 0 + cluster=one-pod-rs0 + spinup_psmdb one-pod-rs0 /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/conf/one-pod-rs0.yml 1 + local cluster=one-pod-rs0 + local config=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/conf/one-pod-rs0.yml + local size=1 + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/conf/one-pod-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/conf/one-pod-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/conf/one-pod-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2163-64e6d5ce"' + local LAST_OUT=/tmp/tmp.zlUmDFuOqj + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + /usr/sbin/sed -e s/NAME_SPACE/one-pod-18702/g + local LAST_ERR=/tmp/tmp.jt2cvq8YAm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zlUmDFuOqj perconaservermongodb.psmdb.percona.com/one-pod created + cat /tmp/tmp.jt2cvq8YAm + rm /tmp/tmp.zlUmDFuOqj /tmp/tmp.jt2cvq8YAm + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running one-pod-rs0 1 + local name=one-pod-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=one-pod ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JDlJqYBH5a +++ mktemp ++ local LAST_ERR=/tmp/tmp.WoC6RnbQJJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JDlJqYBH5a ++ cat /tmp/tmp.WoC6RnbQJJ ++ rm /tmp/tmp.JDlJqYBH5a /tmp/tmp.WoC6RnbQJJ ++ return 0 + [[ false == \t\r\u\e ]] + wait_pod one-pod-rs0-0 + local pod=one-pod-rs0-0 + set +o xtrace waiting for pod/one-pod-rs0-0 to be ready.........OK ++ kubectl_bin get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SpfCWm7aPW +++ mktemp ++ local LAST_ERR=/tmp/tmp.52SJnTZO8T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SpfCWm7aPW ++ cat /tmp/tmp.52SJnTZO8T ++ rm /tmp/tmp.SpfCWm7aPW /tmp/tmp.52SJnTZO8T ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SyKFUWI7lL +++ mktemp ++ local LAST_ERR=/tmp/tmp.0S0HBi16LR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SyKFUWI7lL ++ cat /tmp/tmp.0S0HBi16LR ++ rm /tmp/tmp.SyKFUWI7lL /tmp/tmp.0S0HBi16LR ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................. + sleep 20 + compare_kubectl statefulset/one-pod-rs0 + local resource=statefulset/one-pod-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/statefulset_one-pod-rs0.yml + local new_result=/tmp/tmp.Cl7M6QWuAu/statefulset_one-pod-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/statefulset_one-pod-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/one-pod-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("one-pod-18702", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.j70wDq73w0 ++ mktemp + local LAST_ERR=/tmp/tmp.Z5Mhujn510 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/one-pod-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.j70wDq73w0 + cat /tmp/tmp.Z5Mhujn510 + rm /tmp/tmp.j70wDq73w0 /tmp/tmp.Z5Mhujn510 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Cl7M6QWuAu/statefulset_one-pod-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Cl7M6QWuAu/statefulset_one-pod-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Cl7M6QWuAu/statefulset_one-pod-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/statefulset_one-pod-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/statefulset_one-pod-rs0.yml /tmp/tmp.Cl7M6QWuAu/statefulset_one-pod-rs0.yml + log 'compare_kubectl: statefulset/one-pod-rs0 OK' + set +o xtrace [2025-12-24T10:59:26+0000] compare_kubectl: statefulset/one-pod-rs0 OK + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@one-pod-rs0.one-pod-18702 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@one-pod-rs0.one-pod-18702 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@one-pod-rs0.one-pod-18702 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OWAHPjUg8K +++ mktemp ++ local LAST_ERR=/tmp/tmp.zdoFleku5H ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OWAHPjUg8K ++ cat /tmp/tmp.zdoFleku5H ++ rm /tmp/tmp.OWAHPjUg8K /tmp/tmp.zdoFleku5H ++ return 0 + local client_container=psmdb-client-696897d69b-7mwqw + kubectl_bin exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.9PxYlkXZDQ ++ mktemp + local LAST_ERR=/tmp/tmp.s386KK4fst + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9PxYlkXZDQ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://one-pod-rs0-0.one-pod-rs0.one-pod-18702.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("91441f1b-bca1-4fce-bf77-82968c722213") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.s386KK4fst + rm /tmp/tmp.9PxYlkXZDQ /tmp/tmp.s386KK4fst + return 0 + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@one-pod-rs0.one-pod-18702 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@one-pod-rs0.one-pod-18702 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@one-pod-rs0.one-pod-18702 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qrZrUqgfwa +++ mktemp ++ local LAST_ERR=/tmp/tmp.IuWQioMbvg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qrZrUqgfwa ++ cat /tmp/tmp.IuWQioMbvg ++ rm /tmp/tmp.qrZrUqgfwa /tmp/tmp.IuWQioMbvg ++ return 0 + local client_container=psmdb-client-696897d69b-7mwqw + kubectl_bin exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bnbMpYxPKs ++ mktemp + local LAST_ERR=/tmp/tmp.Sk8Y2Uth4w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bnbMpYxPKs Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://one-pod-rs0-0.one-pod-rs0.one-pod-18702.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("9a5a8af0-73d8-4ead-bf87-0f339c0ad06b") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Sk8Y2Uth4w + rm /tmp/tmp.bnbMpYxPKs /tmp/tmp.Sk8Y2Uth4w + return 0 + wait_cluster_consistency one-pod + local cluster_name=one-pod + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb one-pod -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ebbNNxl2iQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.fz2LlYwZFW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ebbNNxl2iQ ++ cat /tmp/tmp.fz2LlYwZFW ++ rm /tmp/tmp.ebbNNxl2iQ /tmp/tmp.fz2LlYwZFW ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + desc 'check if service and pvc created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and pvc created with expected config ----------------------------------------------------------------------------------- + compare_kubectl service/one-pod-rs0 + local resource=service/one-pod-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/service_one-pod-rs0.yml + local new_result=/tmp/tmp.Cl7M6QWuAu/service_one-pod-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/service_one-pod-rs0-oc.yml ']' + kubectl_bin get -o yaml service/one-pod-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("one-pod-18702", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.xu6Eit54ms ++ mktemp + local LAST_ERR=/tmp/tmp.oYL2gBp5hu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/one-pod-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xu6Eit54ms + cat /tmp/tmp.oYL2gBp5hu + rm /tmp/tmp.xu6Eit54ms /tmp/tmp.oYL2gBp5hu + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Cl7M6QWuAu/service_one-pod-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Cl7M6QWuAu/service_one-pod-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Cl7M6QWuAu/service_one-pod-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/service_one-pod-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/service_one-pod-rs0.yml /tmp/tmp.Cl7M6QWuAu/service_one-pod-rs0.yml + log 'compare_kubectl: service/one-pod-rs0 OK' + set +o xtrace [2025-12-24T10:59:39+0000] compare_kubectl: service/one-pod-rs0 OK + compare_kubectl pvc/mongod-data-one-pod-rs0-0 + local resource=pvc/mongod-data-one-pod-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/pvc_mongod-data-one-pod-rs0-0.yml + local new_result=/tmp/tmp.Cl7M6QWuAu/pvc_mongod-data-one-pod-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/pvc_mongod-data-one-pod-rs0-0-oc.yml ']' + kubectl_bin get -o yaml pvc/mongod-data-one-pod-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("one-pod-18702", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ehHRpaoY0t ++ mktemp + local LAST_ERR=/tmp/tmp.jyXFtjmAC4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml pvc/mongod-data-one-pod-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ehHRpaoY0t + cat /tmp/tmp.jyXFtjmAC4 + rm /tmp/tmp.ehHRpaoY0t /tmp/tmp.jyXFtjmAC4 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Cl7M6QWuAu/pvc_mongod-data-one-pod-rs0-0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Cl7M6QWuAu/pvc_mongod-data-one-pod-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Cl7M6QWuAu/pvc_mongod-data-one-pod-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/pvc_mongod-data-one-pod-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/pvc_mongod-data-one-pod-rs0-0.yml /tmp/tmp.Cl7M6QWuAu/pvc_mongod-data-one-pod-rs0-0.yml + log 'compare_kubectl: pvc/mongod-data-one-pod-rs0-0 OK' + set +o xtrace [2025-12-24T10:59:39+0000] compare_kubectl: pvc/mongod-data-one-pod-rs0-0 OK + desc 'check system log' + set +o xtrace ----------------------------------------------------------------------------------- check system log ----------------------------------------------------------------------------------- + run_mongo 'db.serverCmdLineOpts()' clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' + local 'command=db.serverCmdLineOpts()' + local uri=clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702 + local driver=mongodb+srv + /usr/sbin/sed -re 's/((Timestamp|BinData|NumberLong)\((.+?\)))/{}/g' + jq .parsed.systemLog + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.AMwcEaAmEH +++ mktemp ++ local LAST_ERR=/tmp/tmp.9EkqLH0zA3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AMwcEaAmEH ++ cat /tmp/tmp.9EkqLH0zA3 ++ rm /tmp/tmp.AMwcEaAmEH /tmp/tmp.9EkqLH0zA3 ++ return 0 + local client_container=psmdb-client-696897d69b-7mwqw + kubectl_bin exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Ml9GQXh4QC ++ mktemp + local LAST_ERR=/tmp/tmp.evZ1P4KIbH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ml9GQXh4QC + cat /tmp/tmp.evZ1P4KIbH + rm /tmp/tmp.Ml9GQXh4QC /tmp/tmp.evZ1P4KIbH + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/serverCmdLineOpts_parsed_systemLog.json /tmp/tmp.Cl7M6QWuAu/parsed_systemLog.json + desc 'create secret and check custom config' + set +o xtrace ----------------------------------------------------------------------------------- create secret and check custom config ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/conf/mongod-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.b9Olcgh3XU ++ mktemp + local LAST_ERR=/tmp/tmp.QI4SkjUG0l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/conf/mongod-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b9Olcgh3XU secret/one-pod-rs0-mongod created + cat /tmp/tmp.QI4SkjUG0l + rm /tmp/tmp.b9Olcgh3XU /tmp/tmp.QI4SkjUG0l + return 0 + wait_cluster_consistency one-pod + local cluster_name=one-pod + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb one-pod -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.byIjTbVx5F +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z2kJsZdNmu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.byIjTbVx5F ++ cat /tmp/tmp.Z2kJsZdNmu ++ rm /tmp/tmp.byIjTbVx5F /tmp/tmp.Z2kJsZdNmu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb one-pod -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KmG4kjrYYv +++ mktemp ++ local LAST_ERR=/tmp/tmp.lstyc8YG4G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KmG4kjrYYv ++ cat /tmp/tmp.lstyc8YG4G ++ rm /tmp/tmp.KmG4kjrYYv /tmp/tmp.lstyc8YG4G ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb one-pod -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3w0DcF0HVg +++ mktemp ++ local LAST_ERR=/tmp/tmp.3LsZG2Iq9D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3w0DcF0HVg ++ cat /tmp/tmp.3LsZG2Iq9D ++ rm /tmp/tmp.3w0DcF0HVg /tmp/tmp.3LsZG2Iq9D ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb one-pod -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GpxCkbOb6d +++ mktemp ++ local LAST_ERR=/tmp/tmp.XuL3c0CocL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GpxCkbOb6d ++ cat /tmp/tmp.XuL3c0CocL ++ rm /tmp/tmp.GpxCkbOb6d /tmp/tmp.XuL3c0CocL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb one-pod -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3Erx5DfRNH +++ mktemp ++ local LAST_ERR=/tmp/tmp.0d38N5BRfe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3Erx5DfRNH ++ cat /tmp/tmp.0d38N5BRfe ++ rm /tmp/tmp.3Erx5DfRNH /tmp/tmp.0d38N5BRfe ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb one-pod -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tY8YqngmaN +++ mktemp ++ local LAST_ERR=/tmp/tmp.cQvxck5oHB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tY8YqngmaN ++ cat /tmp/tmp.cQvxck5oHB ++ rm /tmp/tmp.tY8YqngmaN /tmp/tmp.cQvxck5oHB ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/one-pod-rs0 -secret + local resource=statefulset/one-pod-rs0 + local postfix=-secret + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/statefulset_one-pod-rs0-secret.yml + local new_result=/tmp/tmp.Cl7M6QWuAu/statefulset_one-pod-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/statefulset_one-pod-rs0-secret-oc.yml ']' + kubectl_bin get -o yaml statefulset/one-pod-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("one-pod-18702", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.F6mA0VbtAJ ++ mktemp + local LAST_ERR=/tmp/tmp.WPy7bDvtsI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/one-pod-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.F6mA0VbtAJ + cat /tmp/tmp.WPy7bDvtsI + rm /tmp/tmp.F6mA0VbtAJ /tmp/tmp.WPy7bDvtsI + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Cl7M6QWuAu/statefulset_one-pod-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Cl7M6QWuAu/statefulset_one-pod-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Cl7M6QWuAu/statefulset_one-pod-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/statefulset_one-pod-rs0-secret.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/statefulset_one-pod-rs0-secret.yml /tmp/tmp.Cl7M6QWuAu/statefulset_one-pod-rs0.yml + log 'compare_kubectl: statefulset/one-pod-rs0 OK' + set +o xtrace [2025-12-24T11:00:48+0000] compare_kubectl: statefulset/one-pod-rs0 OK + run_mongo 'db.serverCmdLineOpts()' clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702 + local 'command=db.serverCmdLineOpts()' + local uri=clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' + local replica_set=rs0 + [[ clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702 == *cfg* ]] + /usr/sbin/sed -re 's/((Timestamp|BinData|NumberLong)\((.+?\)))/{}/g' + jq .parsed.systemLog ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.PIjHDBIh3e +++ mktemp ++ local LAST_ERR=/tmp/tmp.qjcAZ8op7m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PIjHDBIh3e ++ cat /tmp/tmp.qjcAZ8op7m ++ rm /tmp/tmp.PIjHDBIh3e /tmp/tmp.qjcAZ8op7m ++ return 0 + local client_container=psmdb-client-696897d69b-7mwqw + kubectl_bin exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.LxuerIhyXo ++ mktemp + local LAST_ERR=/tmp/tmp.NMogRz3aFR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LxuerIhyXo + cat /tmp/tmp.NMogRz3aFR + rm /tmp/tmp.LxuerIhyXo /tmp/tmp.NMogRz3aFR + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/compare/serverCmdLineOpts_parsed_systemLog_secret.json /tmp/tmp.Cl7M6QWuAu/parsed_systemLog_secret.json + desc 'create secret and check custom config' + set +o xtrace ----------------------------------------------------------------------------------- create secret and check custom config ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/conf/mongod-secret-2.yml ++ mktemp + local LAST_OUT=/tmp/tmp.w2zt0ubTWQ ++ mktemp + local LAST_ERR=/tmp/tmp.XmjbxnsXzO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2163/e2e-tests/one-pod/conf/mongod-secret-2.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w2zt0ubTWQ secret/one-pod-rs0-mongod configured + cat /tmp/tmp.XmjbxnsXzO + rm /tmp/tmp.w2zt0ubTWQ /tmp/tmp.XmjbxnsXzO + return 0 + wait_cluster_consistency one-pod + local cluster_name=one-pod + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb one-pod -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9iP1zB4Ugq +++ mktemp ++ local LAST_ERR=/tmp/tmp.2x4XUkrS11 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb one-pod -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9iP1zB4Ugq ++ cat /tmp/tmp.2x4XUkrS11 ++ rm /tmp/tmp.9iP1zB4Ugq /tmp/tmp.2x4XUkrS11 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + run_mongo 'db.serverCmdLineOpts()' clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702 + local 'command=db.serverCmdLineOpts()' + local uri=clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702 == *cfg* ]] + /usr/sbin/sed -re 's/((Timestamp|BinData|NumberLong)\((.+?\)))/{}/g' + jq .parsed.systemLog ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.nEtOifoDWO +++ mktemp ++ local LAST_ERR=/tmp/tmp.6jp7uL6BwH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nEtOifoDWO ++ cat /tmp/tmp.6jp7uL6BwH ++ rm /tmp/tmp.nEtOifoDWO /tmp/tmp.6jp7uL6BwH ++ return 0 + local client_container=psmdb-client-696897d69b-7mwqw + kubectl_bin exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ZLhskkeeBr ++ mktemp + local LAST_ERR=/tmp/tmp.ytOtMY7LTu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=2 + set -e + '[' 2 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ZLhskkeeBr + cat /tmp/tmp.ytOtMY7LTu DNSHostNotFound: Failed to look up service "_mongodb._tcp.one-pod-rs0.one-pod-18702.svc.cluster.local": Success try 'mongo --help' for more information command terminated with exit code 2 + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ZLhskkeeBr + cat /tmp/tmp.ytOtMY7LTu command terminated with exit code 1 + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7mwqw -- bash -c 'printf '\''db.serverCmdLineOpts()\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@one-pod-rs0.one-pod-18702.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=2 + set -e + '[' 2 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ZLhskkeeBr + cat /tmp/tmp.ytOtMY7LTu DNSHostNotFound: Failed to look up service "_mongodb._tcp.one-pod-rs0.one-pod-18702.svc.cluster.local": Success try 'mongo --help' for more information command terminated with exit code 2 + sleep 8 + cat /tmp/tmp.ZLhskkeeBr + cat /tmp/tmp.ytOtMY7LTu DNSHostNotFound: Failed to look up service "_mongodb._tcp.one-pod-rs0.one-pod-18702.svc.cluster.local": Success try 'mongo --help' for more information command terminated with exit code 2 + rm /tmp/tmp.ZLhskkeeBr /tmp/tmp.ytOtMY7LTu + return 2 jq: parse error: Invalid numeric literal at line 2, column 6