Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/logs/service-per-pod.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + main + create_infra service-per-pod-15836 + local ns=service-per-pod-15836 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.pODw2AGzLi ++ mktemp + local LAST_ERR=/tmp/tmp.tUKoIZrR8Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pODw2AGzLi customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.tUKoIZrR8Q + rm /tmp/tmp.pODw2AGzLi /tmp/tmp.tUKoIZrR8Q + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.HAD9wkDwWq ++ mktemp + local LAST_ERR=/tmp/tmp.NWCRkUB49s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HAD9wkDwWq + cat /tmp/tmp.NWCRkUB49s + rm /tmp/tmp.HAD9wkDwWq /tmp/tmp.NWCRkUB49s + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.TSRr9DyLV4 ++ mktemp + local LAST_ERR=/tmp/tmp.Wnm66PBns6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TSRr9DyLV4 + cat /tmp/tmp.Wnm66PBns6 + rm /tmp/tmp.TSRr9DyLV4 /tmp/tmp.Wnm66PBns6 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.3XLO0txdjk ++ mktemp + local LAST_ERR=/tmp/tmp.JbLw3W6Xfw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3XLO0txdjk + cat /tmp/tmp.JbLw3W6Xfw + rm /tmp/tmp.3XLO0txdjk /tmp/tmp.JbLw3W6Xfw + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.RdZFQripoh ++ mktemp + local LAST_ERR=/tmp/tmp.jOuBaSkUqF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RdZFQripoh clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.jOuBaSkUqF + rm /tmp/tmp.RdZFQripoh /tmp/tmp.jOuBaSkUqF + return 0 + check_crd_for_deletion PR-1912-ab1be45a + local git_tag=PR-1912-ab1be45a ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1912-ab1be45a/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kCS1QBoF1X +++ mktemp ++ local LAST_ERR=/tmp/tmp.sN1zDsXulq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.kCS1QBoF1X ++ cat /tmp/tmp.sN1zDsXulq Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.kCS1QBoF1X ++ cat /tmp/tmp.sN1zDsXulq Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.kCS1QBoF1X ++ cat /tmp/tmp.sN1zDsXulq Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.kCS1QBoF1X ++ cat /tmp/tmp.sN1zDsXulq Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.kCS1QBoF1X /tmp/tmp.sN1zDsXulq ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh.org ++ awk '{print $1}' ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + kubectl_bin get ns + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.HZ1tiaokBm + local LAST_OUT=/tmp/tmp.5AOLddNH87 ++ mktemp + local LAST_ERR=/tmp/tmp.Big9F1zoGm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.u370rkjXN1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5AOLddNH87 + cat /tmp/tmp.Big9F1zoGm + rm /tmp/tmp.5AOLddNH87 /tmp/tmp.Big9F1zoGm + return 0 namespace "cert-manager" deleted namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "service-per-pod-14790" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HZ1tiaokBm namespace "psmdb-operator" deleted + cat /tmp/tmp.u370rkjXN1 + rm /tmp/tmp.HZ1tiaokBm /tmp/tmp.u370rkjXN1 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.penzLIRE0d ++ mktemp + local LAST_ERR=/tmp/tmp.rl5DbfZPCG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.penzLIRE0d + cat /tmp/tmp.rl5DbfZPCG + rm /tmp/tmp.penzLIRE0d /tmp/tmp.rl5DbfZPCG + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.3ZOG5ImN0y ++ mktemp + local LAST_ERR=/tmp/tmp.FzhADHrYHg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3ZOG5ImN0y namespace/psmdb-operator created + cat /tmp/tmp.FzhADHrYHg + rm /tmp/tmp.3ZOG5ImN0y /tmp/tmp.FzhADHrYHg + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.2HR0W4U8W0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3hmGvnDA37 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2HR0W4U8W0 ++ cat /tmp/tmp.3hmGvnDA37 ++ rm /tmp/tmp.2HR0W4U8W0 /tmp/tmp.3hmGvnDA37 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster6 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.mtHz0YPmMA ++ mktemp + local LAST_ERR=/tmp/tmp.MnqptXo0cV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster6 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mtHz0YPmMA Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster6" modified. + cat /tmp/tmp.MnqptXo0cV + rm /tmp/tmp.mtHz0YPmMA /tmp/tmp.MnqptXo0cV + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.506GPuHW3Z ++ mktemp + local LAST_ERR=/tmp/tmp.fS11Gss80v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.506GPuHW3Z customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.fS11Gss80v + rm /tmp/tmp.506GPuHW3Z /tmp/tmp.fS11Gss80v + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + kubectl_bin apply -n psmdb-operator -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.wtyPDVRYji ++ mktemp + local LAST_ERR=/tmp/tmp.l27wNZ0fzj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wtyPDVRYji clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.l27wNZ0fzj + rm /tmp/tmp.wtyPDVRYji /tmp/tmp.l27wNZ0fzj + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1912-ab1be45a") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.yamh6kiEco ++ mktemp + local LAST_ERR=/tmp/tmp.DykR8kP66C + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yamh6kiEco deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.DykR8kP66C + rm /tmp/tmp.yamh6kiEco /tmp/tmp.DykR8kP66C + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Eh3bmavPUu +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zzxe0a4TNx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Eh3bmavPUu ++ cat /tmp/tmp.Zzxe0a4TNx ++ rm /tmp/tmp.Eh3bmavPUu /tmp/tmp.Zzxe0a4TNx ++ return 0 + wait_pod percona-server-mongodb-operator-5b4f87c7bd-n4qgl + local pod=percona-server-mongodb-operator-5b4f87c7bd-n4qgl + set +o xtrace waiting for pod/percona-server-mongodb-operator-5b4f87c7bd-n4qgl to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.I4gfS3MFdE +++ mktemp ++ local LAST_ERR=/tmp/tmp.dMnpSf9ve4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I4gfS3MFdE ++ cat /tmp/tmp.dMnpSf9ve4 ++ rm /tmp/tmp.I4gfS3MFdE /tmp/tmp.dMnpSf9ve4 ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-5b4f87c7bd-n4qgl ++ mktemp + local LAST_OUT=/tmp/tmp.0fLWQctfZB ++ mktemp + local LAST_ERR=/tmp/tmp.NuGC0kDuRw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs percona-server-mongodb-operator-5b4f87c7bd-n4qgl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0fLWQctfZB + cat /tmp/tmp.NuGC0kDuRw + rm /tmp/tmp.0fLWQctfZB /tmp/tmp.NuGC0kDuRw + return 0 2025-05-21T13:10:56.914Z INFO setup Manager starting up {"gitCommit": "ab1be45a35452d5d20912637f506f22dd6dd4c33", "gitBranch": "PR-1912-ab1be45a", "buildTime": "", "goVersion": "go1.24.3", "os": "linux", "arch": "amd64"} + create_namespace service-per-pod-15836 + local namespace=service-per-pod-15836 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ tail -n1 ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces service-per-pod-15836' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces service-per-pod-15836 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace service-per-pod-15836 --ignore-not-found ++ mktemp + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' + local LAST_OUT=/tmp/tmp.htcq1dIMLP + kubectl_bin get ns ++ mktemp + local LAST_ERR=/tmp/tmp.xIvMXlASyp + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_OUT=/tmp/tmp.BsQPFqryTN ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace service-per-pod-15836 --ignore-not-found ++ mktemp + local LAST_ERR=/tmp/tmp.ra5VXCK6TL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BsQPFqryTN + cat /tmp/tmp.ra5VXCK6TL + rm /tmp/tmp.BsQPFqryTN /tmp/tmp.ra5VXCK6TL + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.htcq1dIMLP + cat /tmp/tmp.xIvMXlASyp + rm /tmp/tmp.htcq1dIMLP /tmp/tmp.xIvMXlASyp + return 0 + kubectl_bin wait --for=delete namespace service-per-pod-15836 ++ mktemp + local LAST_OUT=/tmp/tmp.hCsgDmoiIR ++ mktemp + local LAST_ERR=/tmp/tmp.2mnBZ9cvti + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace service-per-pod-15836 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hCsgDmoiIR + cat /tmp/tmp.2mnBZ9cvti + rm /tmp/tmp.hCsgDmoiIR /tmp/tmp.2mnBZ9cvti + return 0 + desc 'create namespace service-per-pod-15836' + set +o xtrace ----------------------------------------------------------------------------------- create namespace service-per-pod-15836 ----------------------------------------------------------------------------------- + kubectl_bin create namespace service-per-pod-15836 ++ mktemp + local LAST_OUT=/tmp/tmp.GVBHoQHEBt ++ mktemp + local LAST_ERR=/tmp/tmp.vsqq660gr8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace service-per-pod-15836 namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GVBHoQHEBt namespace/service-per-pod-15836 created + cat /tmp/tmp.vsqq660gr8 + rm /tmp/tmp.GVBHoQHEBt /tmp/tmp.vsqq660gr8 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.I2jvsiNhZj +++ mktemp ++ local LAST_ERR=/tmp/tmp.MlRLOgJDu6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I2jvsiNhZj ++ cat /tmp/tmp.MlRLOgJDu6 ++ rm /tmp/tmp.I2jvsiNhZj /tmp/tmp.MlRLOgJDu6 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster6 --namespace=service-per-pod-15836 ++ mktemp + local LAST_OUT=/tmp/tmp.KLA0kR34xP ++ mktemp + local LAST_ERR=/tmp/tmp.hH9QbtF96d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster6 --namespace=service-per-pod-15836 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KLA0kR34xP Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1912-ab1be45a-6-cluster6" modified. + cat /tmp/tmp.hH9QbtF96d + rm /tmp/tmp.KLA0kR34xP /tmp/tmp.hH9QbtF96d + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.CiJ5UvndQ4 ++ mktemp + local LAST_ERR=/tmp/tmp.4UaIvrHClO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CiJ5UvndQ4 namespace/cert-manager created + cat /tmp/tmp.4UaIvrHClO + rm /tmp/tmp.CiJ5UvndQ4 /tmp/tmp.4UaIvrHClO + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.xyVMIx0L9b ++ mktemp + local LAST_ERR=/tmp/tmp.nSIqrgjDNS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xyVMIx0L9b namespace/cert-manager labeled + cat /tmp/tmp.nSIqrgjDNS + rm /tmp/tmp.xyVMIx0L9b /tmp/tmp.nSIqrgjDNS + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.vJY2FbLO3E ++ mktemp + local LAST_ERR=/tmp/tmp.DmsUF2qd1W + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vJY2FbLO3E namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.DmsUF2qd1W Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.vJY2FbLO3E /tmp/tmp.DmsUF2qd1W + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.MaRpaI4ZvB ++ mktemp + local LAST_ERR=/tmp/tmp.vI45FWRQ8v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MaRpaI4ZvB pod/cert-manager-6687d8765c-6rzw9 condition met pod/cert-manager-cainjector-764498cfc8-rr6h5 condition met pod/cert-manager-webhook-74c74b87d7-ctm2s condition met + cat /tmp/tmp.vI45FWRQ8v + rm /tmp/tmp.MaRpaI4ZvB /tmp/tmp.vI45FWRQ8v + return 0 + sleep 120 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.icvLdBs97H ++ mktemp + local LAST_ERR=/tmp/tmp.haEz60JqHa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.icvLdBs97H deployment.apps/psmdb-client created secret/some-users created + cat /tmp/tmp.haEz60JqHa + rm /tmp/tmp.icvLdBs97H /tmp/tmp.haEz60JqHa + return 0 + desc 'check ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- check ClusterIP ----------------------------------------------------------------------------------- + check_cr_config cluster-ip-rs0 + local cluster=cluster-ip-rs0 + desc 'create PSMDB cluster cluster-ip-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster cluster-ip-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' ++ mktemp + local LAST_OUT=/tmp/tmp.FpxdCXc3xN + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1912-ab1be45a"' + local LAST_ERR=/tmp/tmp.0EMnUxPXej + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FpxdCXc3xN perconaservermongodb.psmdb.percona.com/cluster-ip created + cat /tmp/tmp.0EMnUxPXej + rm /tmp/tmp.FpxdCXc3xN /tmp/tmp.0EMnUxPXej + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running cluster-ip-rs0 3 false + local name=cluster-ip-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cluster-ip ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod cluster-ip-rs0-0 + local pod=cluster-ip-rs0-0 + set +o xtrace waiting for pod/cluster-ip-rs0-0 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod cluster-ip-rs0-1 + local pod=cluster-ip-rs0-1 + set +o xtrace waiting for pod/cluster-ip-rs0-1 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZFBgPJdNbE +++ mktemp ++ local LAST_ERR=/tmp/tmp.3PLO9mdZZt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZFBgPJdNbE ++ cat /tmp/tmp.3PLO9mdZZt ++ rm /tmp/tmp.ZFBgPJdNbE /tmp/tmp.3PLO9mdZZt ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod cluster-ip-rs0-2 + local pod=cluster-ip-rs0-2 + set +o xtrace waiting for pod/cluster-ip-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5mLeJ44HS1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9qCb7ue3d8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5mLeJ44HS1 ++ cat /tmp/tmp.9qCb7ue3d8 ++ rm /tmp/tmp.5mLeJ44HS1 /tmp/tmp.9qCb7ue3d8 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/cluster-ip-rs0 + local resource=statefulset/cluster-ip-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml + local new_result=/tmp/tmp.XUCiRCZY5g/statefulset_cluster-ip-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-15836", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/cluster-ip-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.91RO6WEFn7 ++ mktemp + local LAST_ERR=/tmp/tmp.h6tGZwKl8p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/cluster-ip-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.91RO6WEFn7 + cat /tmp/tmp.h6tGZwKl8p + rm /tmp/tmp.91RO6WEFn7 /tmp/tmp.h6tGZwKl8p + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.XUCiRCZY5g/statefulset_cluster-ip-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.XUCiRCZY5g/statefulset_cluster-ip-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.XUCiRCZY5g/statefulset_cluster-ip-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml /tmp/tmp.XUCiRCZY5g/statefulset_cluster-ip-rs0.yml + compare_kubectl service/cluster-ip-rs0-0 + local resource=service/cluster-ip-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml + local new_result=/tmp/tmp.XUCiRCZY5g/service_cluster-ip-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-15836", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml service/cluster-ip-rs0-0 ++ mktemp + local LAST_OUT=/tmp/tmp.REDplhOHJg ++ mktemp + local LAST_ERR=/tmp/tmp.Mo2R9Wmp3G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/cluster-ip-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.REDplhOHJg + cat /tmp/tmp.Mo2R9Wmp3G + rm /tmp/tmp.REDplhOHJg /tmp/tmp.Mo2R9Wmp3G + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.XUCiRCZY5g/service_cluster-ip-rs0-0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.XUCiRCZY5g/service_cluster-ip-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.XUCiRCZY5g/service_cluster-ip-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml /tmp/tmp.XUCiRCZY5g/service_cluster-ip-rs0-0.yml ++ get_service_ip cluster-ip-rs0-0 ++ local service=cluster-ip-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NL3x8fySfs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gYyoLo4MFH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.NL3x8fySfs +++ cat /tmp/tmp.gYyoLo4MFH +++ rm /tmp/tmp.NL3x8fySfs /tmp/tmp.gYyoLo4MFH +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lf0HnXlsDW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PjUI9QLGG2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lf0HnXlsDW +++ cat /tmp/tmp.PjUI9QLGG2 +++ rm /tmp/tmp.lf0HnXlsDW /tmp/tmp.PjUI9QLGG2 +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nJJvEHc0sw +++ mktemp ++ local LAST_ERR=/tmp/tmp.BDObd0OIXy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nJJvEHc0sw ++ cat /tmp/tmp.BDObd0OIXy ++ rm /tmp/tmp.nJJvEHc0sw /tmp/tmp.BDObd0OIXy ++ return 0 ++ return ++ get_service_ip cluster-ip-rs0-1 ++ local service=cluster-ip-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.q1bJzKqBtu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.77TsgLUNz9 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.q1bJzKqBtu +++ cat /tmp/tmp.77TsgLUNz9 +++ rm /tmp/tmp.q1bJzKqBtu /tmp/tmp.77TsgLUNz9 +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CubSoveQRf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DHELBlGsvP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CubSoveQRf +++ cat /tmp/tmp.DHELBlGsvP +++ rm /tmp/tmp.CubSoveQRf /tmp/tmp.DHELBlGsvP +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.euHjGAxJkf +++ mktemp ++ local LAST_ERR=/tmp/tmp.THE7zloiGC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.euHjGAxJkf ++ cat /tmp/tmp.THE7zloiGC ++ rm /tmp/tmp.euHjGAxJkf /tmp/tmp.THE7zloiGC ++ return 0 ++ return ++ get_service_ip cluster-ip-rs0-2 ++ local service=cluster-ip-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XnhhE55HYd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CItQnmVW2O +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XnhhE55HYd +++ cat /tmp/tmp.CItQnmVW2O +++ rm /tmp/tmp.XnhhE55HYd /tmp/tmp.CItQnmVW2O +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.peWzZnY0zS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wG8xjtvHb7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.peWzZnY0zS +++ cat /tmp/tmp.wG8xjtvHb7 +++ rm /tmp/tmp.peWzZnY0zS /tmp/tmp.wG8xjtvHb7 +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MJeyYG3De2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.dws9VftZ5m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MJeyYG3De2 ++ cat /tmp/tmp.dws9VftZ5m ++ rm /tmp/tmp.MJeyYG3De2 /tmp/tmp.dws9VftZ5m ++ return 0 ++ return + local URI=34.118.229.228,34.118.232.12,34.118.228.79 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@34.118.229.228,34.118.232.12,34.118.228.79 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@34.118.229.228,34.118.232.12,34.118.228.79 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZZlm7U2IzG +++ mktemp ++ local LAST_ERR=/tmp/tmp.OGSQ2nQ3sa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZZlm7U2IzG ++ cat /tmp/tmp.OGSQ2nQ3sa ++ rm /tmp/tmp.ZZlm7U2IzG /tmp/tmp.OGSQ2nQ3sa ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ userAdmin:userAdmin123456@34.118.229.228,34.118.232.12,34.118.228.79 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.229.228,34.118.232.12,34.118.228.79:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jl9mcGQ9hD ++ mktemp + local LAST_ERR=/tmp/tmp.hkrYL1ad3M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.229.228,34.118.232.12,34.118.228.79:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jl9mcGQ9hD Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.229.228:27017,34.118.232.12:27017,34.118.228.79:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("6834880e-c223-4cce-bad0-6afbaf39aea6") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.hkrYL1ad3M + rm /tmp/tmp.jl9mcGQ9hD /tmp/tmp.hkrYL1ad3M + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@34.118.229.228,34.118.232.12,34.118.228.79 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@34.118.229.228,34.118.232.12,34.118.228.79 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NssemGzYmq +++ mktemp ++ local LAST_ERR=/tmp/tmp.ASfzraE05L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NssemGzYmq ++ cat /tmp/tmp.ASfzraE05L ++ rm /tmp/tmp.NssemGzYmq /tmp/tmp.ASfzraE05L ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@34.118.229.228,34.118.232.12,34.118.228.79 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.229.228,34.118.232.12,34.118.228.79:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.idpBraVlEY ++ mktemp + local LAST_ERR=/tmp/tmp.LzXpe7rp4g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.229.228,34.118.232.12,34.118.228.79:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.idpBraVlEY Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.229.228:27017,34.118.232.12:27017,34.118.228.79:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f1c7f7fa-17aa-47c3-9f85-7a3cba6e9530") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.LzXpe7rp4g + rm /tmp/tmp.idpBraVlEY /tmp/tmp.LzXpe7rp4g + return 0 + sleep 30 ++ get_service_ip cluster-ip-rs0-0 ++ local service=cluster-ip-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OTz5md9ROM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NmcoZBLrHi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OTz5md9ROM +++ cat /tmp/tmp.NmcoZBLrHi +++ rm /tmp/tmp.OTz5md9ROM /tmp/tmp.NmcoZBLrHi +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.t4E7UgW1Qw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ribb0TQHAc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.t4E7UgW1Qw +++ cat /tmp/tmp.ribb0TQHAc +++ rm /tmp/tmp.t4E7UgW1Qw /tmp/tmp.ribb0TQHAc +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OcVRMdErGq +++ mktemp ++ local LAST_ERR=/tmp/tmp.cDolwAsEYj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OcVRMdErGq ++ cat /tmp/tmp.cDolwAsEYj ++ rm /tmp/tmp.OcVRMdErGq /tmp/tmp.cDolwAsEYj ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.229.228 '' :27017 + local command=find + local uri=myApp:myPass@34.118.229.228 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:17:01+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.229.228 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.229.228 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jxhGtheCHc +++ mktemp ++ local LAST_ERR=/tmp/tmp.NYl6gYvgk1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jxhGtheCHc ++ cat /tmp/tmp.NYl6gYvgk1 ++ rm /tmp/tmp.jxhGtheCHc /tmp/tmp.NYl6gYvgk1 ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@34.118.229.228 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.229.228:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.K8SqGbNNhq ++ mktemp + local LAST_ERR=/tmp/tmp.O6dR4Qxwyh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.229.228:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K8SqGbNNhq + cat /tmp/tmp.O6dR4Qxwyh + rm /tmp/tmp.K8SqGbNNhq /tmp/tmp.O6dR4Qxwyh + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.XUCiRCZY5g/find ++ get_service_ip cluster-ip-rs0-1 ++ local service=cluster-ip-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ModANbxt61 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.m4cI9qnAq2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ModANbxt61 +++ cat /tmp/tmp.m4cI9qnAq2 +++ rm /tmp/tmp.ModANbxt61 /tmp/tmp.m4cI9qnAq2 +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iwk5FK9FRK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AVml5lTSTM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.iwk5FK9FRK +++ cat /tmp/tmp.AVml5lTSTM +++ rm /tmp/tmp.iwk5FK9FRK /tmp/tmp.AVml5lTSTM +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n1owGZJpCa +++ mktemp ++ local LAST_ERR=/tmp/tmp.eOBaWyPb9P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n1owGZJpCa ++ cat /tmp/tmp.eOBaWyPb9P ++ rm /tmp/tmp.n1owGZJpCa /tmp/tmp.eOBaWyPb9P ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.232.12 '' :27017 + local command=find + local uri=myApp:myPass@34.118.232.12 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:17:09+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.232.12 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.232.12 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.BpyDpfGfvL +++ mktemp ++ local LAST_ERR=/tmp/tmp.6eX0wgtw9u ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BpyDpfGfvL ++ cat /tmp/tmp.6eX0wgtw9u ++ rm /tmp/tmp.BpyDpfGfvL /tmp/tmp.6eX0wgtw9u ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@34.118.232.12 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.232.12:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.FanD7a9OJG ++ mktemp + local LAST_ERR=/tmp/tmp.Bsy4Ezud9P + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.232.12:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FanD7a9OJG + cat /tmp/tmp.Bsy4Ezud9P + rm /tmp/tmp.FanD7a9OJG /tmp/tmp.Bsy4Ezud9P + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.XUCiRCZY5g/find ++ get_service_ip cluster-ip-rs0-2 ++ local service=cluster-ip-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XYv0VWW1SU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1ljp4XBi4o +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XYv0VWW1SU +++ cat /tmp/tmp.1ljp4XBi4o +++ rm /tmp/tmp.XYv0VWW1SU /tmp/tmp.1ljp4XBi4o +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ht3jkT2W3M ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KrnueF2qic +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ht3jkT2W3M +++ cat /tmp/tmp.KrnueF2qic +++ rm /tmp/tmp.ht3jkT2W3M /tmp/tmp.KrnueF2qic +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.J9YBgZCRos +++ mktemp ++ local LAST_ERR=/tmp/tmp.dwJwAFI1S3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J9YBgZCRos ++ cat /tmp/tmp.dwJwAFI1S3 ++ rm /tmp/tmp.J9YBgZCRos /tmp/tmp.dwJwAFI1S3 ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.228.79 '' :27017 + local command=find + local uri=myApp:myPass@34.118.228.79 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:17:17+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.228.79 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.228.79 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aLigJIaxVW +++ mktemp ++ local LAST_ERR=/tmp/tmp.DuveDpyzxz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aLigJIaxVW ++ cat /tmp/tmp.DuveDpyzxz ++ rm /tmp/tmp.aLigJIaxVW /tmp/tmp.DuveDpyzxz ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@34.118.228.79 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.228.79:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Nuo5Bxgq9S ++ mktemp + local LAST_ERR=/tmp/tmp.BT6YV0btU7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.228.79:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Nuo5Bxgq9S + cat /tmp/tmp.BT6YV0btU7 + rm /tmp/tmp.Nuo5Bxgq9S /tmp/tmp.BT6YV0btU7 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.XUCiRCZY5g/find + [[ cluster-ip-rs0 == \n\o\d\e\-\p\o\r\t\-\r\s\0 ]] + desc 'delete PSMDB cluster cluster-ip-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster cluster-ip-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.CvTpPbE0m6 ++ mktemp + local LAST_ERR=/tmp/tmp.qslSF8IZnT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CvTpPbE0m6 perconaservermongodb.psmdb.percona.com "cluster-ip" deleted + cat /tmp/tmp.qslSF8IZnT + rm /tmp/tmp.CvTpPbE0m6 /tmp/tmp.qslSF8IZnT + return 0 + desc 'check LoadBalancer' + set +o xtrace ----------------------------------------------------------------------------------- check LoadBalancer ----------------------------------------------------------------------------------- + check_cr_config local-balancer-rs0 + local cluster=local-balancer-rs0 + desc 'create PSMDB cluster local-balancer-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster local-balancer-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1912-ab1be45a"' + local LAST_OUT=/tmp/tmp.RPTqVPuIdu + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' ++ mktemp + local LAST_ERR=/tmp/tmp.nQc2spOxA3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RPTqVPuIdu perconaservermongodb.psmdb.percona.com/local-balancer created + cat /tmp/tmp.nQc2spOxA3 + rm /tmp/tmp.RPTqVPuIdu /tmp/tmp.nQc2spOxA3 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running local-balancer-rs0 3 false + local name=local-balancer-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=local-balancer ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod local-balancer-rs0-0 + local pod=local-balancer-rs0-0 + set +o xtrace waiting for pod/local-balancer-rs0-0 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod local-balancer-rs0-1 + local pod=local-balancer-rs0-1 + set +o xtrace waiting for pod/local-balancer-rs0-1 to be ready....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.riFZW7mxju +++ mktemp ++ local LAST_ERR=/tmp/tmp.QVQx9ln831 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.riFZW7mxju ++ cat /tmp/tmp.QVQx9ln831 ++ rm /tmp/tmp.riFZW7mxju /tmp/tmp.QVQx9ln831 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod local-balancer-rs0-2 + local pod=local-balancer-rs0-2 + set +o xtrace waiting for pod/local-balancer-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lN2zEp0Jeq +++ mktemp ++ local LAST_ERR=/tmp/tmp.jz0l9wQLbb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lN2zEp0Jeq ++ cat /tmp/tmp.jz0l9wQLbb ++ rm /tmp/tmp.lN2zEp0Jeq /tmp/tmp.jz0l9wQLbb ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/local-balancer-rs0 + local resource=statefulset/local-balancer-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml + local new_result=/tmp/tmp.XUCiRCZY5g/statefulset_local-balancer-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/local-balancer-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-15836", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ecWLp8pix3 ++ mktemp + local LAST_ERR=/tmp/tmp.eNs5zNJuRC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/local-balancer-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ecWLp8pix3 + cat /tmp/tmp.eNs5zNJuRC + rm /tmp/tmp.ecWLp8pix3 /tmp/tmp.eNs5zNJuRC + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.XUCiRCZY5g/statefulset_local-balancer-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.XUCiRCZY5g/statefulset_local-balancer-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.XUCiRCZY5g/statefulset_local-balancer-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml /tmp/tmp.XUCiRCZY5g/statefulset_local-balancer-rs0.yml + compare_kubectl service/local-balancer-rs0-0 + local resource=service/local-balancer-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml + local new_result=/tmp/tmp.XUCiRCZY5g/service_local-balancer-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0-oc.yml ']' + kubectl_bin get -o yaml service/local-balancer-rs0-0 ++ mktemp + local LAST_OUT=/tmp/tmp.PSrONIqLuh ++ mktemp + local LAST_ERR=/tmp/tmp.gzbW0GP0NN + local exit_status=0 + local timeout=4 ++ seq 0 2 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-15836", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/local-balancer-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PSrONIqLuh + cat /tmp/tmp.gzbW0GP0NN + rm /tmp/tmp.PSrONIqLuh /tmp/tmp.gzbW0GP0NN + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.XUCiRCZY5g/service_local-balancer-rs0-0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.XUCiRCZY5g/service_local-balancer-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.XUCiRCZY5g/service_local-balancer-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml /tmp/tmp.XUCiRCZY5g/service_local-balancer-rs0-0.yml ++ get_service_ip local-balancer-rs0-0 ++ local service=local-balancer-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FXZJoB7kjU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.m77rQ9Qo5E +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FXZJoB7kjU +++ cat /tmp/tmp.m77rQ9Qo5E +++ rm /tmp/tmp.FXZJoB7kjU /tmp/tmp.m77rQ9Qo5E +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DjRU7CFqW2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2V8MPTpiN6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DjRU7CFqW2 +++ cat /tmp/tmp.2V8MPTpiN6 +++ rm /tmp/tmp.DjRU7CFqW2 /tmp/tmp.2V8MPTpiN6 +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5yit8ZFiLe +++ mktemp ++ local LAST_ERR=/tmp/tmp.j57o5zXw2E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5yit8ZFiLe ++ cat /tmp/tmp.j57o5zXw2E ++ rm /tmp/tmp.5yit8ZFiLe /tmp/tmp.j57o5zXw2E ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0mXM8rYduP +++ mktemp ++ local LAST_ERR=/tmp/tmp.pCmiat8sZT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0mXM8rYduP ++ cat /tmp/tmp.pCmiat8sZT ++ rm /tmp/tmp.0mXM8rYduP /tmp/tmp.pCmiat8sZT ++ return 0 ++ get_service_ip local-balancer-rs0-1 ++ local service=local-balancer-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pCGH7BKQro ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GtBvRvhOOk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pCGH7BKQro +++ cat /tmp/tmp.GtBvRvhOOk +++ rm /tmp/tmp.pCGH7BKQro /tmp/tmp.GtBvRvhOOk +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.10TnKHXtQO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.j5QgaNZLTi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.10TnKHXtQO +++ cat /tmp/tmp.j5QgaNZLTi +++ rm /tmp/tmp.10TnKHXtQO /tmp/tmp.j5QgaNZLTi +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pDlG7j9qQD +++ mktemp ++ local LAST_ERR=/tmp/tmp.dwR39oYwRq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pDlG7j9qQD ++ cat /tmp/tmp.dwR39oYwRq ++ rm /tmp/tmp.pDlG7j9qQD /tmp/tmp.dwR39oYwRq ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mIirEDXoBs +++ mktemp ++ local LAST_ERR=/tmp/tmp.ymQTGbFfUd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mIirEDXoBs ++ cat /tmp/tmp.ymQTGbFfUd ++ rm /tmp/tmp.mIirEDXoBs /tmp/tmp.ymQTGbFfUd ++ return 0 ++ get_service_ip local-balancer-rs0-2 ++ local service=local-balancer-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kBNWPU7ZSJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fX0VygfDmY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kBNWPU7ZSJ +++ cat /tmp/tmp.fX0VygfDmY +++ rm /tmp/tmp.kBNWPU7ZSJ /tmp/tmp.fX0VygfDmY +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RKYo8fnzZa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.372Xr4kO8Z +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RKYo8fnzZa +++ cat /tmp/tmp.372Xr4kO8Z +++ rm /tmp/tmp.RKYo8fnzZa /tmp/tmp.372Xr4kO8Z +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zKAWzArNO1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6Cz0wtS62t ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zKAWzArNO1 ++ cat /tmp/tmp.6Cz0wtS62t ++ rm /tmp/tmp.zKAWzArNO1 /tmp/tmp.6Cz0wtS62t ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XcLiIV44V6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hdNMh0ehtw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XcLiIV44V6 ++ cat /tmp/tmp.hdNMh0ehtw ++ rm /tmp/tmp.XcLiIV44V6 /tmp/tmp.hdNMh0ehtw ++ return 0 + local URI=34.133.63.68,35.232.85.49,35.224.188.239 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@34.133.63.68,35.232.85.49,35.224.188.239 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@34.133.63.68,35.232.85.49,35.224.188.239 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f4Yrsfy9Bj +++ mktemp ++ local LAST_ERR=/tmp/tmp.iiGzRU49sO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f4Yrsfy9Bj ++ cat /tmp/tmp.iiGzRU49sO ++ rm /tmp/tmp.f4Yrsfy9Bj /tmp/tmp.iiGzRU49sO ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ userAdmin:userAdmin123456@34.133.63.68,35.232.85.49,35.224.188.239 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.133.63.68,35.232.85.49,35.224.188.239:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.geVppfsyuw ++ mktemp + local LAST_ERR=/tmp/tmp.vL7CIaXKWI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.133.63.68,35.232.85.49,35.224.188.239:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.geVppfsyuw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.133.63.68:27017,35.232.85.49:27017,35.224.188.239:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("07a77de0-fe39-4508-bd36-6a763e7f1a87") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.vL7CIaXKWI + rm /tmp/tmp.geVppfsyuw /tmp/tmp.vL7CIaXKWI + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@34.133.63.68,35.232.85.49,35.224.188.239 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@34.133.63.68,35.232.85.49,35.224.188.239 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FevRs6sEX2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kEVQJxC8zV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FevRs6sEX2 ++ cat /tmp/tmp.kEVQJxC8zV ++ rm /tmp/tmp.FevRs6sEX2 /tmp/tmp.kEVQJxC8zV ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@34.133.63.68,35.232.85.49,35.224.188.239 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.133.63.68,35.232.85.49,35.224.188.239:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.IUIMwKWTPl ++ mktemp + local LAST_ERR=/tmp/tmp.OAY8tUjFNj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.133.63.68,35.232.85.49,35.224.188.239:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IUIMwKWTPl Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.133.63.68:27017,35.232.85.49:27017,35.224.188.239:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c295b45e-cd41-46ab-be47-f62c1fc03ac1") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.OAY8tUjFNj + rm /tmp/tmp.IUIMwKWTPl /tmp/tmp.OAY8tUjFNj + return 0 + sleep 30 ++ get_service_ip local-balancer-rs0-0 ++ local service=local-balancer-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OCfb4TPeMy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PMYPWH1jCn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OCfb4TPeMy +++ cat /tmp/tmp.PMYPWH1jCn +++ rm /tmp/tmp.OCfb4TPeMy /tmp/tmp.PMYPWH1jCn +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BS5KKxUuL3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bVv24jfkGC +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.BS5KKxUuL3 +++ cat /tmp/tmp.bVv24jfkGC +++ rm /tmp/tmp.BS5KKxUuL3 /tmp/tmp.bVv24jfkGC +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZMJ1XF8Vm1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jhjDNwRUyC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZMJ1XF8Vm1 ++ cat /tmp/tmp.jhjDNwRUyC ++ rm /tmp/tmp.ZMJ1XF8Vm1 /tmp/tmp.jhjDNwRUyC ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P6d6Psk5E2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gI8wwVBxyb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P6d6Psk5E2 ++ cat /tmp/tmp.gI8wwVBxyb ++ rm /tmp/tmp.P6d6Psk5E2 /tmp/tmp.gI8wwVBxyb ++ return 0 + compare_mongo_cmd find myApp:myPass@34.133.63.68 '' :27017 + local command=find + local uri=myApp:myPass@34.133.63.68 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:21:09+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.133.63.68 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.133.63.68 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local suffix=:27017 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AFaO4ZegUx +++ mktemp ++ local LAST_ERR=/tmp/tmp.9f1PoGJyHE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AFaO4ZegUx ++ cat /tmp/tmp.9f1PoGJyHE ++ rm /tmp/tmp.AFaO4ZegUx /tmp/tmp.9f1PoGJyHE ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@34.133.63.68 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.133.63.68:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.7ClvgFCePP ++ mktemp + local LAST_ERR=/tmp/tmp.DHSVqt2NcQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.133.63.68:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7ClvgFCePP + cat /tmp/tmp.DHSVqt2NcQ + rm /tmp/tmp.7ClvgFCePP /tmp/tmp.DHSVqt2NcQ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.XUCiRCZY5g/find ++ get_service_ip local-balancer-rs0-1 ++ local service=local-balancer-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hUN1fS2Ene ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OqVOOyCXAH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hUN1fS2Ene +++ cat /tmp/tmp.OqVOOyCXAH +++ rm /tmp/tmp.hUN1fS2Ene /tmp/tmp.OqVOOyCXAH +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nCsqkAlCZS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ThovqTJOZm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nCsqkAlCZS +++ cat /tmp/tmp.ThovqTJOZm +++ rm /tmp/tmp.nCsqkAlCZS /tmp/tmp.ThovqTJOZm +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A2GarjLbLQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.vC23dgtb4V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A2GarjLbLQ ++ cat /tmp/tmp.vC23dgtb4V ++ rm /tmp/tmp.A2GarjLbLQ /tmp/tmp.vC23dgtb4V ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7OKoz6kgMt +++ mktemp ++ local LAST_ERR=/tmp/tmp.lpCw69bAVZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7OKoz6kgMt ++ cat /tmp/tmp.lpCw69bAVZ ++ rm /tmp/tmp.7OKoz6kgMt /tmp/tmp.lpCw69bAVZ ++ return 0 + compare_mongo_cmd find myApp:myPass@35.232.85.49 '' :27017 + local command=find + local uri=myApp:myPass@35.232.85.49 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:21:17+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@35.232.85.49 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@35.232.85.49 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.xicopHYR2d +++ mktemp ++ local LAST_ERR=/tmp/tmp.SekZr60nn2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xicopHYR2d ++ cat /tmp/tmp.SekZr60nn2 ++ rm /tmp/tmp.xicopHYR2d /tmp/tmp.SekZr60nn2 ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@35.232.85.49 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@35.232.85.49:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.GdTH2JMEQ2 ++ mktemp + local LAST_ERR=/tmp/tmp.dPGQLtsBqr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@35.232.85.49:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GdTH2JMEQ2 + cat /tmp/tmp.dPGQLtsBqr + rm /tmp/tmp.GdTH2JMEQ2 /tmp/tmp.dPGQLtsBqr + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.XUCiRCZY5g/find ++ get_service_ip local-balancer-rs0-2 ++ local service=local-balancer-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XqDiFgQ1k1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RWqTmFymcP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XqDiFgQ1k1 +++ cat /tmp/tmp.RWqTmFymcP +++ rm /tmp/tmp.XqDiFgQ1k1 /tmp/tmp.RWqTmFymcP +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2ewFI4rXdW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.L2nZTvXVKJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2ewFI4rXdW +++ cat /tmp/tmp.L2nZTvXVKJ +++ rm /tmp/tmp.2ewFI4rXdW /tmp/tmp.L2nZTvXVKJ +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Kn3MotkkGc +++ mktemp ++ local LAST_ERR=/tmp/tmp.yLGI6SLLUr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Kn3MotkkGc ++ cat /tmp/tmp.yLGI6SLLUr ++ rm /tmp/tmp.Kn3MotkkGc /tmp/tmp.yLGI6SLLUr ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ya4A6fCdIB +++ mktemp ++ local LAST_ERR=/tmp/tmp.zh7hjiQvvI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ya4A6fCdIB ++ cat /tmp/tmp.zh7hjiQvvI ++ rm /tmp/tmp.Ya4A6fCdIB /tmp/tmp.zh7hjiQvvI ++ return 0 + compare_mongo_cmd find myApp:myPass@35.224.188.239 '' :27017 + local command=find + local uri=myApp:myPass@35.224.188.239 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:21:23+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@35.224.188.239 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@35.224.188.239 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local suffix=:27017 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gcTMib3TnT +++ mktemp ++ local LAST_ERR=/tmp/tmp.kp7zSEGVwb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gcTMib3TnT ++ cat /tmp/tmp.kp7zSEGVwb ++ rm /tmp/tmp.gcTMib3TnT /tmp/tmp.kp7zSEGVwb ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@35.224.188.239 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@35.224.188.239:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Uk6LJ08PAk ++ mktemp + local LAST_ERR=/tmp/tmp.bo5SXeeKUc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@35.224.188.239:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Uk6LJ08PAk + cat /tmp/tmp.bo5SXeeKUc + rm /tmp/tmp.Uk6LJ08PAk /tmp/tmp.bo5SXeeKUc + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.XUCiRCZY5g/find + [[ local-balancer-rs0 == \n\o\d\e\-\p\o\r\t\-\r\s\0 ]] + desc 'delete PSMDB cluster local-balancer-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster local-balancer-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.M5bI05KgLY ++ mktemp + local LAST_ERR=/tmp/tmp.nKrtmtW6ha + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M5bI05KgLY perconaservermongodb.psmdb.percona.com "local-balancer" deleted + cat /tmp/tmp.nKrtmtW6ha + rm /tmp/tmp.M5bI05KgLY /tmp/tmp.nKrtmtW6ha + return 0 + desc 'check NodePort' + set +o xtrace ----------------------------------------------------------------------------------- check NodePort ----------------------------------------------------------------------------------- + check_cr_config node-port-rs0 + local cluster=node-port-rs0 + desc 'create PSMDB cluster node-port-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster node-port-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/node-port-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/node-port-rs0.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/node-port-rs0.yml ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + local LAST_OUT=/tmp/tmp.tKiMVlJ2Ol + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1912-ab1be45a"' ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_ERR=/tmp/tmp.bwcDyBdEuJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tKiMVlJ2Ol perconaservermongodb.psmdb.percona.com/node-port created + cat /tmp/tmp.bwcDyBdEuJ + rm /tmp/tmp.tKiMVlJ2Ol /tmp/tmp.bwcDyBdEuJ + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running node-port-rs0 3 false + local name=node-port-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=node-port ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod node-port-rs0-0 + local pod=node-port-rs0-0 + set +o xtrace waiting for pod/node-port-rs0-0 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod node-port-rs0-1 + local pod=node-port-rs0-1 + set +o xtrace waiting for pod/node-port-rs0-1 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lauQrieYOq +++ mktemp ++ local LAST_ERR=/tmp/tmp.aEiMmrYJjk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lauQrieYOq ++ cat /tmp/tmp.aEiMmrYJjk ++ rm /tmp/tmp.lauQrieYOq /tmp/tmp.aEiMmrYJjk ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod node-port-rs0-2 + local pod=node-port-rs0-2 + set +o xtrace waiting for pod/node-port-rs0-2 to be ready....OK ++ kubectl_bin get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A0QufOOcra +++ mktemp ++ local LAST_ERR=/tmp/tmp.xg7HSyGHwx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A0QufOOcra ++ cat /tmp/tmp.xg7HSyGHwx ++ rm /tmp/tmp.A0QufOOcra /tmp/tmp.xg7HSyGHwx ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/node-port-rs0 + local resource=statefulset/node-port-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml + local new_result=/tmp/tmp.XUCiRCZY5g/statefulset_node-port-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/node-port-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-15836", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.2jeMctFhIB ++ mktemp + local LAST_ERR=/tmp/tmp.fdHxSvYM2W + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/node-port-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2jeMctFhIB + cat /tmp/tmp.fdHxSvYM2W + rm /tmp/tmp.2jeMctFhIB /tmp/tmp.fdHxSvYM2W + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.XUCiRCZY5g/statefulset_node-port-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.XUCiRCZY5g/statefulset_node-port-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.XUCiRCZY5g/statefulset_node-port-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml /tmp/tmp.XUCiRCZY5g/statefulset_node-port-rs0.yml + compare_kubectl service/node-port-rs0-0 + local resource=service/node-port-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml + local new_result=/tmp/tmp.XUCiRCZY5g/service_node-port-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-oc.yml ']' + kubectl_bin get -o yaml service/node-port-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-15836", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.XaQu0YdQPe ++ mktemp + local LAST_ERR=/tmp/tmp.442bAa0BhS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/node-port-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XaQu0YdQPe + cat /tmp/tmp.442bAa0BhS + rm /tmp/tmp.XaQu0YdQPe /tmp/tmp.442bAa0BhS + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.XUCiRCZY5g/service_node-port-rs0-0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.XUCiRCZY5g/service_node-port-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.XUCiRCZY5g/service_node-port-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml /tmp/tmp.XUCiRCZY5g/service_node-port-rs0-0.yml ++ get_service_ip node-port-rs0-0 ++ local service=node-port-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zvGrzghjSy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OXl9Ac9Pfd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zvGrzghjSy +++ cat /tmp/tmp.OXl9Ac9Pfd +++ rm /tmp/tmp.zvGrzghjSy /tmp/tmp.OXl9Ac9Pfd +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LwLAZvzXVI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MmQVGouPWn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.LwLAZvzXVI +++ cat /tmp/tmp.MmQVGouPWn +++ rm /tmp/tmp.LwLAZvzXVI /tmp/tmp.MmQVGouPWn +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3jESKkdG58 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tY6lHftYGf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3jESKkdG58 ++ cat /tmp/tmp.tY6lHftYGf ++ rm /tmp/tmp.3jESKkdG58 /tmp/tmp.tY6lHftYGf ++ return 0 ++ return ++ get_service_ip node-port-rs0-1 ++ local service=node-port-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ithi8mbYzl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CiA0KyKrYc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ithi8mbYzl +++ cat /tmp/tmp.CiA0KyKrYc +++ rm /tmp/tmp.ithi8mbYzl /tmp/tmp.CiA0KyKrYc +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XQRfpWzFyY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UNyxEHr606 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XQRfpWzFyY +++ cat /tmp/tmp.UNyxEHr606 +++ rm /tmp/tmp.XQRfpWzFyY /tmp/tmp.UNyxEHr606 +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CWmJvwxOu7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.X70XvNQUZH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CWmJvwxOu7 ++ cat /tmp/tmp.X70XvNQUZH ++ rm /tmp/tmp.CWmJvwxOu7 /tmp/tmp.X70XvNQUZH ++ return 0 ++ return ++ get_service_ip node-port-rs0-2 ++ local service=node-port-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.s77GC44deI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Rv5dynrhpH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.s77GC44deI +++ cat /tmp/tmp.Rv5dynrhpH +++ rm /tmp/tmp.s77GC44deI /tmp/tmp.Rv5dynrhpH +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LRGLW3je3c ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ALEi4abO9C +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.LRGLW3je3c +++ cat /tmp/tmp.ALEi4abO9C +++ rm /tmp/tmp.LRGLW3je3c /tmp/tmp.ALEi4abO9C +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hhEh5D6m9C +++ mktemp ++ local LAST_ERR=/tmp/tmp.wZpPVdVK1U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hhEh5D6m9C ++ cat /tmp/tmp.wZpPVdVK1U ++ rm /tmp/tmp.hhEh5D6m9C /tmp/tmp.wZpPVdVK1U ++ return 0 ++ return + local URI=34.118.235.59,34.118.227.25,34.118.231.206 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@34.118.235.59,34.118.227.25,34.118.231.206 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@34.118.235.59,34.118.227.25,34.118.231.206 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y3AylRVKkB +++ mktemp ++ local LAST_ERR=/tmp/tmp.2cMm8obSFL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y3AylRVKkB ++ cat /tmp/tmp.2cMm8obSFL ++ rm /tmp/tmp.Y3AylRVKkB /tmp/tmp.2cMm8obSFL ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ userAdmin:userAdmin123456@34.118.235.59,34.118.227.25,34.118.231.206 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.235.59,34.118.227.25,34.118.231.206:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.m3TWLlAdew ++ mktemp + local LAST_ERR=/tmp/tmp.wk8XO6D7dF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.235.59,34.118.227.25,34.118.231.206:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m3TWLlAdew Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.235.59:27017,34.118.227.25:27017,34.118.231.206:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("71a4217f-2ba4-4633-bda4-dcf88630b229") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.wk8XO6D7dF + rm /tmp/tmp.m3TWLlAdew /tmp/tmp.wk8XO6D7dF + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@34.118.235.59,34.118.227.25,34.118.231.206 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@34.118.235.59,34.118.227.25,34.118.231.206 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RhBNKoJA4L +++ mktemp ++ local LAST_ERR=/tmp/tmp.xTPAIJMpCt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RhBNKoJA4L ++ cat /tmp/tmp.xTPAIJMpCt ++ rm /tmp/tmp.RhBNKoJA4L /tmp/tmp.xTPAIJMpCt ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@34.118.235.59,34.118.227.25,34.118.231.206 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.235.59,34.118.227.25,34.118.231.206:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Y8C85i1L2U ++ mktemp + local LAST_ERR=/tmp/tmp.fU8isqpyxh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.235.59,34.118.227.25,34.118.231.206:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y8C85i1L2U Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.235.59:27017,34.118.227.25:27017,34.118.231.206:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("cba11f87-2284-4176-ad40-3fc187a96ccf") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.fU8isqpyxh + rm /tmp/tmp.Y8C85i1L2U /tmp/tmp.fU8isqpyxh + return 0 + sleep 30 ++ get_service_ip node-port-rs0-0 ++ local service=node-port-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1dEY4cvt8V ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xHrpxNmEB4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.1dEY4cvt8V +++ cat /tmp/tmp.xHrpxNmEB4 +++ rm /tmp/tmp.1dEY4cvt8V /tmp/tmp.xHrpxNmEB4 +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rJI1uqTUo1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VbmyLUNw1B +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rJI1uqTUo1 +++ cat /tmp/tmp.VbmyLUNw1B +++ rm /tmp/tmp.rJI1uqTUo1 /tmp/tmp.VbmyLUNw1B +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oVB8ZVb80T +++ mktemp ++ local LAST_ERR=/tmp/tmp.sHpNERANMi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oVB8ZVb80T ++ cat /tmp/tmp.sHpNERANMi ++ rm /tmp/tmp.oVB8ZVb80T /tmp/tmp.sHpNERANMi ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.235.59 '' :27017 + local command=find + local uri=myApp:myPass@34.118.235.59 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:24:20+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.235.59 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.235.59 + local driver=mongodb + local suffix=:27017 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9EHO3HGmrM +++ mktemp ++ local LAST_ERR=/tmp/tmp.C7A9m8Qy5l ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9EHO3HGmrM ++ cat /tmp/tmp.C7A9m8Qy5l ++ rm /tmp/tmp.9EHO3HGmrM /tmp/tmp.C7A9m8Qy5l ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@34.118.235.59 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.235.59:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.upar8odocI ++ mktemp + local LAST_ERR=/tmp/tmp.cPWo1aGWBX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.235.59:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.upar8odocI + cat /tmp/tmp.cPWo1aGWBX + rm /tmp/tmp.upar8odocI /tmp/tmp.cPWo1aGWBX + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.XUCiRCZY5g/find ++ get_service_ip node-port-rs0-1 ++ local service=node-port-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qg9ekN8EBl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CpGmMDd4Ja +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qg9ekN8EBl +++ cat /tmp/tmp.CpGmMDd4Ja +++ rm /tmp/tmp.qg9ekN8EBl /tmp/tmp.CpGmMDd4Ja +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WHEYtTRMqx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZWFoZQ5W0X +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WHEYtTRMqx +++ cat /tmp/tmp.ZWFoZQ5W0X +++ rm /tmp/tmp.WHEYtTRMqx /tmp/tmp.ZWFoZQ5W0X +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7Y7vN4KeI9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DuuHXcbt0u ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7Y7vN4KeI9 ++ cat /tmp/tmp.DuuHXcbt0u ++ rm /tmp/tmp.7Y7vN4KeI9 /tmp/tmp.DuuHXcbt0u ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.227.25 '' :27017 + local command=find + local uri=myApp:myPass@34.118.227.25 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:24:25+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.227.25 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.227.25 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.agyLmYH8m9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QwBLu61mKu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.agyLmYH8m9 ++ cat /tmp/tmp.QwBLu61mKu ++ rm /tmp/tmp.agyLmYH8m9 /tmp/tmp.QwBLu61mKu ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@34.118.227.25 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.227.25:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.6dGytBBqNZ ++ mktemp + local LAST_ERR=/tmp/tmp.uqJqlVFO54 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.227.25:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6dGytBBqNZ + cat /tmp/tmp.uqJqlVFO54 + rm /tmp/tmp.6dGytBBqNZ /tmp/tmp.uqJqlVFO54 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.XUCiRCZY5g/find ++ get_service_ip node-port-rs0-2 ++ local service=node-port-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HpuKK924XJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Yy4zwMvW3K +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.HpuKK924XJ +++ cat /tmp/tmp.Yy4zwMvW3K +++ rm /tmp/tmp.HpuKK924XJ /tmp/tmp.Yy4zwMvW3K +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vMjQx3QEJz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dxoSyJfLxC +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vMjQx3QEJz +++ cat /tmp/tmp.dxoSyJfLxC +++ rm /tmp/tmp.vMjQx3QEJz /tmp/tmp.dxoSyJfLxC +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Hk6I8WLcnK +++ mktemp ++ local LAST_ERR=/tmp/tmp.cLcpLLUgiq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Hk6I8WLcnK ++ cat /tmp/tmp.cLcpLLUgiq ++ rm /tmp/tmp.Hk6I8WLcnK /tmp/tmp.cLcpLLUgiq ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.231.206 '' :27017 + local command=find + local uri=myApp:myPass@34.118.231.206 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-21T13:24:31+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.231.206 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.231.206 + local driver=mongodb + local suffix=:27017 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2SE2th1JSS +++ mktemp ++ local LAST_ERR=/tmp/tmp.yMoUqptgKg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2SE2th1JSS ++ cat /tmp/tmp.yMoUqptgKg ++ rm /tmp/tmp.2SE2th1JSS /tmp/tmp.yMoUqptgKg ++ return 0 + local client_container=psmdb-client-66f577db5f-46qbn + local mongo_flag= + [[ myApp:myPass@34.118.231.206 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.231.206:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.T4nJtJPZMS ++ mktemp + local LAST_ERR=/tmp/tmp.dVjKzUhuWn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-46qbn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.231.206:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T4nJtJPZMS + cat /tmp/tmp.dVjKzUhuWn + rm /tmp/tmp.T4nJtJPZMS /tmp/tmp.dVjKzUhuWn + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.XUCiRCZY5g/find + [[ node-port-rs0 == \n\o\d\e\-\p\o\r\t\-\r\s\0 ]] + desc 'add service-per-pod label and annotation' + set +o xtrace ----------------------------------------------------------------------------------- add service-per-pod label and annotation ----------------------------------------------------------------------------------- ++ kubectl_bin get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MqVINhS2CG +++ mktemp ++ local LAST_ERR=/tmp/tmp.OzGUNjJzpF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MqVINhS2CG ++ cat /tmp/tmp.OzGUNjJzpF ++ rm /tmp/tmp.MqVINhS2CG /tmp/tmp.OzGUNjJzpF ++ return 0 + old_node_port=30835 + kubectl_bin patch psmdb node-port --type=json --patch '[ { "op": "add", "path": "/spec/replsets/0/expose/annotations", "value": { "test": "service-per-pod", } }, { "op": "add", "path": "/spec/replsets/0/expose/labels", "value": { "test": "service-per-pod", } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.KYf9vlyMZK ++ mktemp + local LAST_ERR=/tmp/tmp.WYWGJL7Q38 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb node-port --type=json --patch '[ { "op": "add", "path": "/spec/replsets/0/expose/annotations", "value": { "test": "service-per-pod", } }, { "op": "add", "path": "/spec/replsets/0/expose/labels", "value": { "test": "service-per-pod", } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KYf9vlyMZK perconaservermongodb.psmdb.percona.com/node-port patched + cat /tmp/tmp.WYWGJL7Q38 + rm /tmp/tmp.KYf9vlyMZK /tmp/tmp.WYWGJL7Q38 + return 0 + sleep 5 + desc 'check if service created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service created with expected config ----------------------------------------------------------------------------------- + compare_kubectl service/node-port-rs0-0 -updated + local resource=service/node-port-rs0-0 + local postfix=-updated + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml + local new_result=/tmp/tmp.XUCiRCZY5g/service_node-port-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated-oc.yml ']' + kubectl_bin get -o yaml service/node-port-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-15836", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.vaNivmC1LM ++ mktemp + local LAST_ERR=/tmp/tmp.njQpWIOIzh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/node-port-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vaNivmC1LM + cat /tmp/tmp.njQpWIOIzh + rm /tmp/tmp.vaNivmC1LM /tmp/tmp.njQpWIOIzh + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.XUCiRCZY5g/service_node-port-rs0-0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.XUCiRCZY5g/service_node-port-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.XUCiRCZY5g/service_node-port-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml /tmp/tmp.XUCiRCZY5g/service_node-port-rs0-0.yml ++ kubectl_bin get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GLF9DKK7iB +++ mktemp ++ local LAST_ERR=/tmp/tmp.jhDGx51tSK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GLF9DKK7iB ++ cat /tmp/tmp.jhDGx51tSK ++ rm /tmp/tmp.GLF9DKK7iB /tmp/tmp.jhDGx51tSK ++ return 0 + current_node_port=30835 + [[ 30835 != \3\0\8\3\5 ]] + desc 'delete PSMDB cluster node-port-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster node-port-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/node-port-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.WfOSYFrAOK ++ mktemp + local LAST_ERR=/tmp/tmp.20CgsK5TMr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/node-port-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WfOSYFrAOK perconaservermongodb.psmdb.percona.com "node-port" deleted + cat /tmp/tmp.20CgsK5TMr + rm /tmp/tmp.WfOSYFrAOK /tmp/tmp.20CgsK5TMr + return 0 + desc 'check Mongos in sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- check Mongos in sharded cluster ----------------------------------------------------------------------------------- + local cluster=some-name + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/sharded.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/sharded.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/e2e-tests/service-per-pod/conf/sharded.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1912-ab1be45a"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.3LFDATAqvi ++ mktemp + local LAST_ERR=/tmp/tmp.jgkLSQnbhH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3LFDATAqvi perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.jgkLSQnbhH + rm /tmp/tmp.3LFDATAqvi /tmp/tmp.jgkLSQnbhH + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready..........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QiWUsaFOnU +++ mktemp ++ local LAST_ERR=/tmp/tmp.L2RzaFa2N8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QiWUsaFOnU ++ cat /tmp/tmp.L2RzaFa2N8 ++ rm /tmp/tmp.QiWUsaFOnU /tmp/tmp.L2RzaFa2N8 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nJ5fTnQT9b +++ mktemp ++ local LAST_ERR=/tmp/tmp.eWGDl49iW9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nJ5fTnQT9b ++ cat /tmp/tmp.eWGDl49iW9 ++ rm /tmp/tmp.nJ5fTnQT9b /tmp/tmp.eWGDl49iW9 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7cIud1CMEX +++ mktemp ++ local LAST_ERR=/tmp/tmp.zsIMFOptn0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7cIud1CMEX ++ cat /tmp/tmp.zsIMFOptn0 ++ rm /tmp/tmp.7cIud1CMEX /tmp/tmp.zsIMFOptn0 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FVXnzuWKDK +++ mktemp ++ local LAST_ERR=/tmp/tmp.LP43EVZA8j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FVXnzuWKDK ++ cat /tmp/tmp.LP43EVZA8j ++ rm /tmp/tmp.FVXnzuWKDK /tmp/tmp.LP43EVZA8j ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B5dMwhqpcL +++ mktemp ++ local LAST_ERR=/tmp/tmp.V5K8dNgC6B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.B5dMwhqpcL ++ cat /tmp/tmp.V5K8dNgC6B ++ rm /tmp/tmp.B5dMwhqpcL /tmp/tmp.V5K8dNgC6B ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5pHmYjbPl8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YhT3JFYwrX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5pHmYjbPl8 ++ cat /tmp/tmp.YhT3JFYwrX ++ rm /tmp/tmp.5pHmYjbPl8 /tmp/tmp.YhT3JFYwrX ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'enabling servicePerPod for mongos' + set +o xtrace ----------------------------------------------------------------------------------- enabling servicePerPod for mongos ----------------------------------------------------------------------------------- + kubectl patch psmdb some-name --type=merge -p '{"spec":{"sharding":{"mongos":{"expose":{"servicePerPod":true}}}}}' perconaservermongodb.psmdb.percona.com/some-name patched + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mu068BtSje +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hpvd8JkG8V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mu068BtSje ++ cat /tmp/tmp.Hpvd8JkG8V ++ rm /tmp/tmp.mu068BtSje /tmp/tmp.Hpvd8JkG8V ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lBhokBY2v5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uSsuUxxO6R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lBhokBY2v5 ++ cat /tmp/tmp.uSsuUxxO6R ++ rm /tmp/tmp.lBhokBY2v5 /tmp/tmp.uSsuUxxO6R ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + check_service present some-name-mongos-0 + state=present + svc_name=some-name-mongos-0 + '[' present = present ']' + echo -n 'check that some-name-mongos-0 was created' check that some-name-mongos-0 was created+ local timeout=0 + kubectl_bin get service/some-name-mongos-0 -o 'jsonpath={.spec.type}' + grep -vq NotFound + echo .OK .OK + check_service present some-name-mongos-1 + state=present + svc_name=some-name-mongos-1 + '[' present = present ']' + echo -n 'check that some-name-mongos-1 was created' check that some-name-mongos-1 was created+ local timeout=0 + kubectl_bin get service/some-name-mongos-1 -o 'jsonpath={.spec.type}' + grep -vq NotFound + echo .OK .OK + check_service present some-name-mongos-2 + state=present + svc_name=some-name-mongos-2 + '[' present = present ']' + echo -n 'check that some-name-mongos-2 was created' check that some-name-mongos-2 was created+ local timeout=0 + grep -vq NotFound + kubectl_bin get service/some-name-mongos-2 -o 'jsonpath={.spec.type}' + echo .OK .OK + check_service removed some-name-mongos + state=removed + svc_name=some-name-mongos + '[' removed = present ']' + '[' removed = removed ']' + echo -n 'check that some-name-mongos was removed' check that some-name-mongos was removed++ kubectl_bin get service/some-name-mongos -o 'jsonpath={.spec.type}' ++ grep NotFound + [[ -z Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found ]] + echo .OK .OK + destroy service-per-pod-15836 + local namespace=service-per-pod-15836 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ wc -l ++ kubectl_bin get psmdb-backup --no-headers +++ mktemp ++ local LAST_OUT=/tmp/tmp.uj1A03ISN8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1nTCjMVrVW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uj1A03ISN8 ++ cat /tmp/tmp.1nTCjMVrVW No resources found in service-per-pod-15836 namespace. ++ rm /tmp/tmp.uj1A03ISN8 /tmp/tmp.1nTCjMVrVW ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.6lswSxN6Hy ++ mktemp + local LAST_ERR=/tmp/tmp.Zk7xKAMVS8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6lswSxN6Hy customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.Zk7xKAMVS8 + rm /tmp/tmp.6lswSxN6Hy /tmp/tmp.Zk7xKAMVS8 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Vf4eJqcD2R ++ mktemp + local LAST_ERR=/tmp/tmp.m2X6btsU2a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Vf4eJqcD2R + cat /tmp/tmp.m2X6btsU2a + rm /tmp/tmp.Vf4eJqcD2R /tmp/tmp.m2X6btsU2a + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.q3UPbxzsML ++ mktemp + local LAST_ERR=/tmp/tmp.mR2O0kXmny + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q3UPbxzsML + cat /tmp/tmp.mR2O0kXmny + rm /tmp/tmp.q3UPbxzsML /tmp/tmp.mR2O0kXmny + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0mUepwhD9n ++ mktemp + local LAST_ERR=/tmp/tmp.C0ByIyRA4r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0mUepwhD9n + cat /tmp/tmp.C0ByIyRA4r + rm /tmp/tmp.0mUepwhD9n /tmp/tmp.C0ByIyRA4r + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.rXi1MueG6w ++ mktemp + local LAST_ERR=/tmp/tmp.aRVnNCEHw3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1912/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rXi1MueG6w clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.aRVnNCEHw3 + rm /tmp/tmp.rXi1MueG6w /tmp/tmp.aRVnNCEHw3 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.7rvTUcwVBY ++ mktemp + local LAST_ERR=/tmp/tmp.TiJC0fNqq6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.7rvTUcwVBY namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.TiJC0fNqq6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.7rvTUcwVBY namespace "cert-manager" deleted + cat /tmp/tmp.TiJC0fNqq6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.7rvTUcwVBY + cat /tmp/tmp.TiJC0fNqq6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.7rvTUcwVBY + cat /tmp/tmp.TiJC0fNqq6 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.7rvTUcwVBY /tmp/tmp.TiJC0fNqq6 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace service-per-pod-15836 + rm -rf /tmp/tmp.XUCiRCZY5g + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed+ local LAST_OUT=/tmp/tmp.L4qSyQqOMu ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.BkVNbgHWC0 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.omJJPThoys + local LAST_ERR=/tmp/tmp.HN6JVFF9FR + local exit_status=0 + local exit_status=0 + local timeout=4 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace service-per-pod-15836 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator