Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/logs/service-per-pod.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + main + create_infra service-per-pod-6273 + local ns=service-per-pod-6273 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.q2AgWwk69H ++ mktemp + local LAST_ERR=/tmp/tmp.S8MLa1D7Hx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q2AgWwk69H customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.S8MLa1D7Hx + rm /tmp/tmp.q2AgWwk69H /tmp/tmp.S8MLa1D7Hx + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.2hW72YGXVu ++ mktemp + local LAST_ERR=/tmp/tmp.mk97Ds0PT6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2hW72YGXVu + cat /tmp/tmp.mk97Ds0PT6 + rm /tmp/tmp.2hW72YGXVu /tmp/tmp.mk97Ds0PT6 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.JJ72FP3l7l ++ mktemp + local LAST_ERR=/tmp/tmp.HvKgnKBcxm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JJ72FP3l7l + cat /tmp/tmp.HvKgnKBcxm + rm /tmp/tmp.JJ72FP3l7l /tmp/tmp.HvKgnKBcxm + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.bCRsCOA6Jg ++ mktemp + local LAST_ERR=/tmp/tmp.xXhzRoaJdo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bCRsCOA6Jg + cat /tmp/tmp.xXhzRoaJdo + rm /tmp/tmp.bCRsCOA6Jg /tmp/tmp.xXhzRoaJdo + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.5zvqXAyirL ++ mktemp + local LAST_ERR=/tmp/tmp.qYu22yJaJW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5zvqXAyirL clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.qYu22yJaJW + rm /tmp/tmp.5zvqXAyirL /tmp/tmp.qYu22yJaJW + return 0 + check_crd_for_deletion PR-1897-ea494d1c + local git_tag=PR-1897-ea494d1c ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1897-ea494d1c/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qt7fbPLXUu +++ mktemp ++ local LAST_ERR=/tmp/tmp.LFIEybBKqq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Qt7fbPLXUu ++ cat /tmp/tmp.LFIEybBKqq Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Qt7fbPLXUu ++ cat /tmp/tmp.LFIEybBKqq Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Qt7fbPLXUu ++ cat /tmp/tmp.LFIEybBKqq Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.Qt7fbPLXUu ++ cat /tmp/tmp.LFIEybBKqq Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.Qt7fbPLXUu /tmp/tmp.LFIEybBKqq ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + awk '{print$1}' + xargs kubectl delete ns ++ mktemp + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + local LAST_OUT=/tmp/tmp.ZUkauyEJY8 + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.26uWrKh2h8 + local LAST_ERR=/tmp/tmp.sdYFoQF5wS + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + local LAST_ERR=/tmp/tmp.q3IRlq2kIu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.26uWrKh2h8 + cat /tmp/tmp.q3IRlq2kIu + rm /tmp/tmp.26uWrKh2h8 /tmp/tmp.q3IRlq2kIu + return 0 namespace "cert-manager" deleted namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "service-per-pod-19575" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZUkauyEJY8 namespace "psmdb-operator" deleted + cat /tmp/tmp.sdYFoQF5wS + rm /tmp/tmp.ZUkauyEJY8 /tmp/tmp.sdYFoQF5wS + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.1JqPc6Eb29 ++ mktemp + local LAST_ERR=/tmp/tmp.lHxA1OD58K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1JqPc6Eb29 + cat /tmp/tmp.lHxA1OD58K + rm /tmp/tmp.1JqPc6Eb29 /tmp/tmp.lHxA1OD58K + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.7t1SlB1ouK ++ mktemp + local LAST_ERR=/tmp/tmp.t2un2zKgrF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7t1SlB1ouK namespace/psmdb-operator created + cat /tmp/tmp.t2un2zKgrF + rm /tmp/tmp.7t1SlB1ouK /tmp/tmp.t2un2zKgrF + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.zq2gtXlEgz +++ mktemp ++ local LAST_ERR=/tmp/tmp.ijSUYUyBam ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zq2gtXlEgz ++ cat /tmp/tmp.ijSUYUyBam ++ rm /tmp/tmp.zq2gtXlEgz /tmp/tmp.ijSUYUyBam ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster5 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.m2Hr6kONbc ++ mktemp + local LAST_ERR=/tmp/tmp.isNoy1G9fg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster5 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m2Hr6kONbc Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster5" modified. + cat /tmp/tmp.isNoy1G9fg + rm /tmp/tmp.m2Hr6kONbc /tmp/tmp.isNoy1G9fg + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Ne1GAH1kjc ++ mktemp + local LAST_ERR=/tmp/tmp.IQpVd4dO5G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ne1GAH1kjc customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.IQpVd4dO5G + rm /tmp/tmp.Ne1GAH1kjc /tmp/tmp.IQpVd4dO5G + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ngHWLB7zWi ++ mktemp + local LAST_ERR=/tmp/tmp.pbGQstIwct + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ngHWLB7zWi clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.pbGQstIwct + rm /tmp/tmp.ngHWLB7zWi /tmp/tmp.pbGQstIwct + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1897-ea494d1c") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2VSWkUsX5O ++ mktemp + local LAST_ERR=/tmp/tmp.7FdGHFKAn2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2VSWkUsX5O deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.7FdGHFKAn2 + rm /tmp/tmp.2VSWkUsX5O /tmp/tmp.7FdGHFKAn2 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.nEaWDQsQLx +++ mktemp ++ local LAST_ERR=/tmp/tmp.nF9UYWO0vP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nEaWDQsQLx ++ cat /tmp/tmp.nF9UYWO0vP ++ rm /tmp/tmp.nEaWDQsQLx /tmp/tmp.nF9UYWO0vP ++ return 0 + wait_pod percona-server-mongodb-operator-564698798-bbfdq + local pod=percona-server-mongodb-operator-564698798-bbfdq + set +o xtrace waiting for pod/percona-server-mongodb-operator-564698798-bbfdq to be ready.OK + create_namespace service-per-pod-6273 + local namespace=service-per-pod-6273 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrolebinding ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrole ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces service-per-pod-6273' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces service-per-pod-6273 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace service-per-pod-6273 --ignore-not-found + awk '{print$1}' ++ mktemp + xargs kubectl delete ns + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + local LAST_OUT=/tmp/tmp.rPKDbKpZuq ++ mktemp + local LAST_OUT=/tmp/tmp.1a7dFAwjFa ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.DeoGyq6Lic + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.bv5Lxj0dRZ + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace service-per-pod-6273 --ignore-not-found + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rPKDbKpZuq + cat /tmp/tmp.DeoGyq6Lic + rm /tmp/tmp.rPKDbKpZuq /tmp/tmp.DeoGyq6Lic + return 0 + kubectl_bin wait --for=delete namespace service-per-pod-6273 ++ mktemp + local LAST_OUT=/tmp/tmp.1s9oLYol1M ++ mktemp + local LAST_ERR=/tmp/tmp.l7HZt7uxTU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace service-per-pod-6273 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1a7dFAwjFa + cat /tmp/tmp.bv5Lxj0dRZ + rm /tmp/tmp.1a7dFAwjFa /tmp/tmp.bv5Lxj0dRZ + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1s9oLYol1M + cat /tmp/tmp.l7HZt7uxTU + rm /tmp/tmp.1s9oLYol1M /tmp/tmp.l7HZt7uxTU + return 0 + desc 'create namespace service-per-pod-6273' + set +o xtrace ----------------------------------------------------------------------------------- create namespace service-per-pod-6273 ----------------------------------------------------------------------------------- + kubectl_bin create namespace service-per-pod-6273 ++ mktemp + local LAST_OUT=/tmp/tmp.5A7legFDji ++ mktemp + local LAST_ERR=/tmp/tmp.zFUdQrprOg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace service-per-pod-6273 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5A7legFDji namespace/service-per-pod-6273 created + cat /tmp/tmp.zFUdQrprOg + rm /tmp/tmp.5A7legFDji /tmp/tmp.zFUdQrprOg + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.qmXpM4x6Ro +++ mktemp ++ local LAST_ERR=/tmp/tmp.z2poCMy9b2 ++ local exit_status=0 ++ local timeout=4 namespace "gmp-public" deleted +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qmXpM4x6Ro ++ cat /tmp/tmp.z2poCMy9b2 ++ rm /tmp/tmp.qmXpM4x6Ro /tmp/tmp.z2poCMy9b2 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster5 --namespace=service-per-pod-6273 ++ mktemp namespace "gmp-system" deleted + local LAST_OUT=/tmp/tmp.NgpGOnoaaU ++ mktemp + local LAST_ERR=/tmp/tmp.7AbNhp3eaU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster5 --namespace=service-per-pod-6273 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NgpGOnoaaU Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1897-ea494d1c-6-cluster5" modified. + cat /tmp/tmp.7AbNhp3eaU + rm /tmp/tmp.NgpGOnoaaU /tmp/tmp.7AbNhp3eaU + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.YZkn4EPEHx ++ mktemp + local LAST_ERR=/tmp/tmp.TkYmCwGHDG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YZkn4EPEHx namespace/cert-manager created + cat /tmp/tmp.TkYmCwGHDG + rm /tmp/tmp.YZkn4EPEHx /tmp/tmp.TkYmCwGHDG + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.3F0KjmdvgY ++ mktemp + local LAST_ERR=/tmp/tmp.JkScOJczvS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3F0KjmdvgY namespace/cert-manager labeled + cat /tmp/tmp.JkScOJczvS + rm /tmp/tmp.3F0KjmdvgY /tmp/tmp.JkScOJczvS + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.kewBTqdNhv ++ mktemp + local LAST_ERR=/tmp/tmp.9WlJ0da4Oa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kewBTqdNhv namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.9WlJ0da4Oa Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.kewBTqdNhv /tmp/tmp.9WlJ0da4Oa + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.7BclU9prBK ++ mktemp + local LAST_ERR=/tmp/tmp.RMl1eQ3ZYB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7BclU9prBK pod/cert-manager-6c7fdcbcd5-nt79c condition met pod/cert-manager-cainjector-64d77f8498-wncc9 condition met pod/cert-manager-webhook-68796f6795-dr8sh condition met + cat /tmp/tmp.RMl1eQ3ZYB + rm /tmp/tmp.7BclU9prBK /tmp/tmp.RMl1eQ3ZYB + return 0 + sleep 120 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.qV7wYdXVeS ++ mktemp + local LAST_ERR=/tmp/tmp.AxzS5B3aIg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qV7wYdXVeS deployment.apps/psmdb-client created secret/some-users created + cat /tmp/tmp.AxzS5B3aIg + rm /tmp/tmp.qV7wYdXVeS /tmp/tmp.AxzS5B3aIg + return 0 + desc 'check ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- check ClusterIP ----------------------------------------------------------------------------------- + check_cr_config cluster-ip-rs0 + local cluster=cluster-ip-rs0 + desc 'create PSMDB cluster cluster-ip-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster cluster-ip-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1897-ea494d1c"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.iZoJBbclzx ++ mktemp + local LAST_ERR=/tmp/tmp.ZRT0llTXUk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iZoJBbclzx perconaservermongodb.psmdb.percona.com/cluster-ip created + cat /tmp/tmp.ZRT0llTXUk + rm /tmp/tmp.iZoJBbclzx /tmp/tmp.ZRT0llTXUk + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running cluster-ip-rs0 3 false + local name=cluster-ip-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cluster-ip ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod cluster-ip-rs0-0 + local pod=cluster-ip-rs0-0 + set +o xtrace waiting for pod/cluster-ip-rs0-0 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod cluster-ip-rs0-1 + local pod=cluster-ip-rs0-1 + set +o xtrace waiting for pod/cluster-ip-rs0-1 to be ready.......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5RSEj9o8d7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.g64gGAa54o ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5RSEj9o8d7 ++ cat /tmp/tmp.g64gGAa54o ++ rm /tmp/tmp.5RSEj9o8d7 /tmp/tmp.g64gGAa54o ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod cluster-ip-rs0-2 + local pod=cluster-ip-rs0-2 + set +o xtrace waiting for pod/cluster-ip-rs0-2 to be ready......OK ++ kubectl_bin get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y0Xmt4pTth +++ mktemp ++ local LAST_ERR=/tmp/tmp.UJbg5YqFS9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y0Xmt4pTth ++ cat /tmp/tmp.UJbg5YqFS9 ++ rm /tmp/tmp.y0Xmt4pTth /tmp/tmp.UJbg5YqFS9 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/cluster-ip-rs0 + local resource=statefulset/cluster-ip-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml + local new_result=/tmp/tmp.TZvRRBkAdo/statefulset_cluster-ip-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-6273", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/cluster-ip-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.SDUGkpd5kF ++ mktemp + local LAST_ERR=/tmp/tmp.psMV5BBwCI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/cluster-ip-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SDUGkpd5kF + cat /tmp/tmp.psMV5BBwCI + rm /tmp/tmp.SDUGkpd5kF /tmp/tmp.psMV5BBwCI + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TZvRRBkAdo/statefulset_cluster-ip-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TZvRRBkAdo/statefulset_cluster-ip-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TZvRRBkAdo/statefulset_cluster-ip-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml /tmp/tmp.TZvRRBkAdo/statefulset_cluster-ip-rs0.yml + compare_kubectl service/cluster-ip-rs0-0 + local resource=service/cluster-ip-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml + local new_result=/tmp/tmp.TZvRRBkAdo/service_cluster-ip-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0-oc.yml ']' + kubectl_bin get -o yaml service/cluster-ip-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-6273", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.xRtM3an4Lz ++ mktemp + local LAST_ERR=/tmp/tmp.UgPUs3Unbx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/cluster-ip-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xRtM3an4Lz + cat /tmp/tmp.UgPUs3Unbx + rm /tmp/tmp.xRtM3an4Lz /tmp/tmp.UgPUs3Unbx + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TZvRRBkAdo/service_cluster-ip-rs0-0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TZvRRBkAdo/service_cluster-ip-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TZvRRBkAdo/service_cluster-ip-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml /tmp/tmp.TZvRRBkAdo/service_cluster-ip-rs0-0.yml ++ get_service_ip cluster-ip-rs0-0 ++ local service=cluster-ip-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3E723OZf1w ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PY3VEmnPR5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3E723OZf1w +++ cat /tmp/tmp.PY3VEmnPR5 +++ rm /tmp/tmp.3E723OZf1w /tmp/tmp.PY3VEmnPR5 +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Rvx8pplab0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0gyuQXibMn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Rvx8pplab0 +++ cat /tmp/tmp.0gyuQXibMn +++ rm /tmp/tmp.Rvx8pplab0 /tmp/tmp.0gyuQXibMn +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XLbyf9mJyY +++ mktemp ++ local LAST_ERR=/tmp/tmp.zxiYMpyduY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XLbyf9mJyY ++ cat /tmp/tmp.zxiYMpyduY ++ rm /tmp/tmp.XLbyf9mJyY /tmp/tmp.zxiYMpyduY ++ return 0 ++ return ++ get_service_ip cluster-ip-rs0-1 ++ local service=cluster-ip-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CUIL73EFnv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EQZ7LtuJqX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CUIL73EFnv +++ cat /tmp/tmp.EQZ7LtuJqX +++ rm /tmp/tmp.CUIL73EFnv /tmp/tmp.EQZ7LtuJqX +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uBEqR2slkH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.op0jYFq8ug +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.uBEqR2slkH +++ cat /tmp/tmp.op0jYFq8ug +++ rm /tmp/tmp.uBEqR2slkH /tmp/tmp.op0jYFq8ug +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZSnO2sditc +++ mktemp ++ local LAST_ERR=/tmp/tmp.9HL7zMpqqU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZSnO2sditc ++ cat /tmp/tmp.9HL7zMpqqU ++ rm /tmp/tmp.ZSnO2sditc /tmp/tmp.9HL7zMpqqU ++ return 0 ++ return ++ get_service_ip cluster-ip-rs0-2 ++ local service=cluster-ip-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BEX0N3Cyp6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wVnc3LZcYv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.BEX0N3Cyp6 +++ cat /tmp/tmp.wVnc3LZcYv +++ rm /tmp/tmp.BEX0N3Cyp6 /tmp/tmp.wVnc3LZcYv +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jLa1zezGkP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wkY48hA9vQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jLa1zezGkP +++ cat /tmp/tmp.wkY48hA9vQ +++ rm /tmp/tmp.jLa1zezGkP /tmp/tmp.wkY48hA9vQ +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GXIHodPCOt +++ mktemp ++ local LAST_ERR=/tmp/tmp.XLrsMgz4nF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GXIHodPCOt ++ cat /tmp/tmp.XLrsMgz4nF ++ rm /tmp/tmp.GXIHodPCOt /tmp/tmp.XLrsMgz4nF ++ return 0 ++ return + local URI=34.118.225.124,34.118.236.191,34.118.230.204 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@34.118.225.124,34.118.236.191,34.118.230.204 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@34.118.225.124,34.118.236.191,34.118.230.204 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rxSXp43XId +++ mktemp ++ local LAST_ERR=/tmp/tmp.YIgEcdWIUD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rxSXp43XId ++ cat /tmp/tmp.YIgEcdWIUD ++ rm /tmp/tmp.rxSXp43XId /tmp/tmp.YIgEcdWIUD ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ userAdmin:userAdmin123456@34.118.225.124,34.118.236.191,34.118.230.204 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.225.124,34.118.236.191,34.118.230.204:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.cyKb8greM3 ++ mktemp + local LAST_ERR=/tmp/tmp.4s51Sswh5t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.225.124,34.118.236.191,34.118.230.204:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cyKb8greM3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.225.124:27017,34.118.236.191:27017,34.118.230.204:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("8bb26f3c-f0cc-4018-a39b-c5a68e6c87b4") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.4s51Sswh5t + rm /tmp/tmp.cyKb8greM3 /tmp/tmp.4s51Sswh5t + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@34.118.225.124,34.118.236.191,34.118.230.204 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@34.118.225.124,34.118.236.191,34.118.230.204 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Nol1WrDnMt +++ mktemp ++ local LAST_ERR=/tmp/tmp.26rCn1QpWW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Nol1WrDnMt ++ cat /tmp/tmp.26rCn1QpWW ++ rm /tmp/tmp.Nol1WrDnMt /tmp/tmp.26rCn1QpWW ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.118.225.124,34.118.236.191,34.118.230.204 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.225.124,34.118.236.191,34.118.230.204:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.FZNww8u047 ++ mktemp + local LAST_ERR=/tmp/tmp.vKkjWXaTFF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.225.124,34.118.236.191,34.118.230.204:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FZNww8u047 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.225.124:27017,34.118.236.191:27017,34.118.230.204:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e1e833f6-a301-4d6b-b20b-cc4e9937a2a8") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.vKkjWXaTFF + rm /tmp/tmp.FZNww8u047 /tmp/tmp.vKkjWXaTFF + return 0 + sleep 30 ++ get_service_ip cluster-ip-rs0-0 ++ local service=cluster-ip-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4r85anjft2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GYwxeaMz6E +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4r85anjft2 +++ cat /tmp/tmp.GYwxeaMz6E +++ rm /tmp/tmp.4r85anjft2 /tmp/tmp.GYwxeaMz6E +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3GxVcAMz8D ++++ mktemp +++ local LAST_ERR=/tmp/tmp.b547EvpVIQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3GxVcAMz8D +++ cat /tmp/tmp.b547EvpVIQ +++ rm /tmp/tmp.3GxVcAMz8D /tmp/tmp.b547EvpVIQ +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L02b6gGQif +++ mktemp ++ local LAST_ERR=/tmp/tmp.k2BvSC4ju0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L02b6gGQif ++ cat /tmp/tmp.k2BvSC4ju0 ++ rm /tmp/tmp.L02b6gGQif /tmp/tmp.k2BvSC4ju0 ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.225.124 '' :27017 + local command=find + local uri=myApp:myPass@34.118.225.124 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T16:00:53+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.225.124 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.225.124 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pYbGNE2PL5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5xinUKUBWd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pYbGNE2PL5 ++ cat /tmp/tmp.5xinUKUBWd ++ rm /tmp/tmp.pYbGNE2PL5 /tmp/tmp.5xinUKUBWd ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.118.225.124 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.225.124:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0tLmmjUk4r ++ mktemp + local LAST_ERR=/tmp/tmp.h2MPp4KCjN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.225.124:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0tLmmjUk4r + cat /tmp/tmp.h2MPp4KCjN + rm /tmp/tmp.0tLmmjUk4r /tmp/tmp.h2MPp4KCjN + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.TZvRRBkAdo/find ++ get_service_ip cluster-ip-rs0-1 ++ local service=cluster-ip-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mZlprQ51P9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Q91FMO3U2U +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.mZlprQ51P9 +++ cat /tmp/tmp.Q91FMO3U2U +++ rm /tmp/tmp.mZlprQ51P9 /tmp/tmp.Q91FMO3U2U +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iBKVZxDaDz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Thz0OBxLHg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.iBKVZxDaDz +++ cat /tmp/tmp.Thz0OBxLHg +++ rm /tmp/tmp.iBKVZxDaDz /tmp/tmp.Thz0OBxLHg +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QiHwGNLX54 +++ mktemp ++ local LAST_ERR=/tmp/tmp.S1Su0wEMdX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QiHwGNLX54 ++ cat /tmp/tmp.S1Su0wEMdX ++ rm /tmp/tmp.QiHwGNLX54 /tmp/tmp.S1Su0wEMdX ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.236.191 '' :27017 + local command=find + local uri=myApp:myPass@34.118.236.191 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T16:01:03+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.236.191 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.236.191 + local driver=mongodb + local suffix=:27017 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E692uXREPz +++ mktemp ++ local LAST_ERR=/tmp/tmp.KYaNDeOclC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E692uXREPz ++ cat /tmp/tmp.KYaNDeOclC ++ rm /tmp/tmp.E692uXREPz /tmp/tmp.KYaNDeOclC ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.118.236.191 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.236.191:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.mQKQYt3RwL ++ mktemp + local LAST_ERR=/tmp/tmp.Iyh8nNkWe7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.236.191:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mQKQYt3RwL + cat /tmp/tmp.Iyh8nNkWe7 + rm /tmp/tmp.mQKQYt3RwL /tmp/tmp.Iyh8nNkWe7 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.TZvRRBkAdo/find ++ get_service_ip cluster-ip-rs0-2 ++ local service=cluster-ip-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1nps87QOzs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Y6tF0VJLRi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.1nps87QOzs +++ cat /tmp/tmp.Y6tF0VJLRi +++ rm /tmp/tmp.1nps87QOzs /tmp/tmp.Y6tF0VJLRi +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7iuCxuWI0C ++++ mktemp +++ local LAST_ERR=/tmp/tmp.z99OoeCqY8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7iuCxuWI0C +++ cat /tmp/tmp.z99OoeCqY8 +++ rm /tmp/tmp.7iuCxuWI0C /tmp/tmp.z99OoeCqY8 +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tMUM4thHig +++ mktemp ++ local LAST_ERR=/tmp/tmp.azKgkAQ2sF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tMUM4thHig ++ cat /tmp/tmp.azKgkAQ2sF ++ rm /tmp/tmp.tMUM4thHig /tmp/tmp.azKgkAQ2sF ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.230.204 '' :27017 + local command=find + local uri=myApp:myPass@34.118.230.204 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T16:01:14+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.230.204 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.230.204 + local driver=mongodb + local suffix=:27017 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QfZolN7EWo +++ mktemp ++ local LAST_ERR=/tmp/tmp.wWzWMsNimR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QfZolN7EWo ++ cat /tmp/tmp.wWzWMsNimR ++ rm /tmp/tmp.QfZolN7EWo /tmp/tmp.wWzWMsNimR ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.118.230.204 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.230.204:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.tasRv3dmHD ++ mktemp + local LAST_ERR=/tmp/tmp.xMCOan7jJr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.230.204:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tasRv3dmHD + cat /tmp/tmp.xMCOan7jJr + rm /tmp/tmp.tasRv3dmHD /tmp/tmp.xMCOan7jJr + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.TZvRRBkAdo/find + [[ cluster-ip-rs0 == \n\o\d\e\-\p\o\r\t\-\r\s\0 ]] + desc 'delete PSMDB cluster cluster-ip-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster cluster-ip-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.GnYmZ3AYd0 ++ mktemp + local LAST_ERR=/tmp/tmp.CF9pNXzZKt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GnYmZ3AYd0 perconaservermongodb.psmdb.percona.com "cluster-ip" deleted + cat /tmp/tmp.CF9pNXzZKt + rm /tmp/tmp.GnYmZ3AYd0 /tmp/tmp.CF9pNXzZKt + return 0 + desc 'check LoadBalancer' + set +o xtrace ----------------------------------------------------------------------------------- check LoadBalancer ----------------------------------------------------------------------------------- + check_cr_config local-balancer-rs0 + local cluster=local-balancer-rs0 + desc 'create PSMDB cluster local-balancer-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster local-balancer-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1897-ea494d1c"' + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + local LAST_OUT=/tmp/tmp.ZP3tEhAp6g + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + local LAST_ERR=/tmp/tmp.7Q0u8c8Cc0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZP3tEhAp6g perconaservermongodb.psmdb.percona.com/local-balancer created + cat /tmp/tmp.7Q0u8c8Cc0 + rm /tmp/tmp.ZP3tEhAp6g /tmp/tmp.7Q0u8c8Cc0 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running local-balancer-rs0 3 false + local name=local-balancer-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=local-balancer ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod local-balancer-rs0-0 + local pod=local-balancer-rs0-0 + set +o xtrace waiting for pod/local-balancer-rs0-0 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod local-balancer-rs0-1 + local pod=local-balancer-rs0-1 + set +o xtrace waiting for pod/local-balancer-rs0-1 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5VWH8KxAh4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eJVgTUsInN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5VWH8KxAh4 ++ cat /tmp/tmp.eJVgTUsInN ++ rm /tmp/tmp.5VWH8KxAh4 /tmp/tmp.eJVgTUsInN ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod local-balancer-rs0-2 + local pod=local-balancer-rs0-2 + set +o xtrace waiting for pod/local-balancer-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RhKEvsis1I +++ mktemp ++ local LAST_ERR=/tmp/tmp.26aeckBJGT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RhKEvsis1I ++ cat /tmp/tmp.26aeckBJGT ++ rm /tmp/tmp.RhKEvsis1I /tmp/tmp.26aeckBJGT ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/local-balancer-rs0 + local resource=statefulset/local-balancer-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml + local new_result=/tmp/tmp.TZvRRBkAdo/statefulset_local-balancer-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/local-balancer-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-6273", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.IMlF6i4OZs ++ mktemp + local LAST_ERR=/tmp/tmp.KVhInR3PVs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/local-balancer-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IMlF6i4OZs + cat /tmp/tmp.KVhInR3PVs + rm /tmp/tmp.IMlF6i4OZs /tmp/tmp.KVhInR3PVs + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TZvRRBkAdo/statefulset_local-balancer-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TZvRRBkAdo/statefulset_local-balancer-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TZvRRBkAdo/statefulset_local-balancer-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml /tmp/tmp.TZvRRBkAdo/statefulset_local-balancer-rs0.yml + compare_kubectl service/local-balancer-rs0-0 + local resource=service/local-balancer-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml + local new_result=/tmp/tmp.TZvRRBkAdo/service_local-balancer-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0-oc.yml ']' + kubectl_bin get -o yaml service/local-balancer-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-6273", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Yu6xt0qAi6 ++ mktemp + local LAST_ERR=/tmp/tmp.rL0OG5fQ8G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/local-balancer-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Yu6xt0qAi6 + cat /tmp/tmp.rL0OG5fQ8G + rm /tmp/tmp.Yu6xt0qAi6 /tmp/tmp.rL0OG5fQ8G + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TZvRRBkAdo/service_local-balancer-rs0-0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TZvRRBkAdo/service_local-balancer-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TZvRRBkAdo/service_local-balancer-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml /tmp/tmp.TZvRRBkAdo/service_local-balancer-rs0-0.yml ++ get_service_ip local-balancer-rs0-0 ++ local service=local-balancer-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FuM58P0ftt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uAvCZGHlEk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FuM58P0ftt +++ cat /tmp/tmp.uAvCZGHlEk +++ rm /tmp/tmp.FuM58P0ftt /tmp/tmp.uAvCZGHlEk +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zwDr5h2gbC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2yTLyBikVo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zwDr5h2gbC +++ cat /tmp/tmp.2yTLyBikVo +++ rm /tmp/tmp.zwDr5h2gbC /tmp/tmp.2yTLyBikVo +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mwl1WmCR1m +++ mktemp ++ local LAST_ERR=/tmp/tmp.7i8j6M1sDa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mwl1WmCR1m ++ cat /tmp/tmp.7i8j6M1sDa ++ rm /tmp/tmp.mwl1WmCR1m /tmp/tmp.7i8j6M1sDa ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ejDrUncBct +++ mktemp ++ local LAST_ERR=/tmp/tmp.yCxd0uqLm9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ejDrUncBct ++ cat /tmp/tmp.yCxd0uqLm9 ++ rm /tmp/tmp.ejDrUncBct /tmp/tmp.yCxd0uqLm9 ++ return 0 ++ get_service_ip local-balancer-rs0-1 ++ local service=local-balancer-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.c2Ifb2PeV6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GCRnRPsoS6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.c2Ifb2PeV6 +++ cat /tmp/tmp.GCRnRPsoS6 +++ rm /tmp/tmp.c2Ifb2PeV6 /tmp/tmp.GCRnRPsoS6 +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vFqz85tsgA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.f8ZPQnRAmm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vFqz85tsgA +++ cat /tmp/tmp.f8ZPQnRAmm +++ rm /tmp/tmp.vFqz85tsgA /tmp/tmp.f8ZPQnRAmm +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ofX9mt9V0y +++ mktemp ++ local LAST_ERR=/tmp/tmp.w5txl3No3f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ofX9mt9V0y ++ cat /tmp/tmp.w5txl3No3f ++ rm /tmp/tmp.ofX9mt9V0y /tmp/tmp.w5txl3No3f ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DA1zzsU963 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ximPeXovcj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DA1zzsU963 ++ cat /tmp/tmp.ximPeXovcj ++ rm /tmp/tmp.DA1zzsU963 /tmp/tmp.ximPeXovcj ++ return 0 ++ get_service_ip local-balancer-rs0-2 ++ local service=local-balancer-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.N0edurNhgu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nSobYp5fn6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.N0edurNhgu +++ cat /tmp/tmp.nSobYp5fn6 +++ rm /tmp/tmp.N0edurNhgu /tmp/tmp.nSobYp5fn6 +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QQAwT41cVK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wF1ojbZU3u +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QQAwT41cVK +++ cat /tmp/tmp.wF1ojbZU3u +++ rm /tmp/tmp.QQAwT41cVK /tmp/tmp.wF1ojbZU3u +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ sleep 1 ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AB9746vYa4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.c14gAP8VeG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AB9746vYa4 ++ cat /tmp/tmp.c14gAP8VeG ++ rm /tmp/tmp.AB9746vYa4 /tmp/tmp.c14gAP8VeG ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xTU8WhVdxz +++ mktemp ++ local LAST_ERR=/tmp/tmp.cMgbcQRF2L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xTU8WhVdxz ++ cat /tmp/tmp.cMgbcQRF2L ++ rm /tmp/tmp.xTU8WhVdxz /tmp/tmp.cMgbcQRF2L ++ return 0 + local URI=34.42.39.206,34.27.72.106,34.68.87.49 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@34.42.39.206,34.27.72.106,34.68.87.49 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@34.42.39.206,34.27.72.106,34.68.87.49 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QAUXPdPOl5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ftf45Cxxad ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QAUXPdPOl5 ++ cat /tmp/tmp.ftf45Cxxad ++ rm /tmp/tmp.QAUXPdPOl5 /tmp/tmp.ftf45Cxxad ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ userAdmin:userAdmin123456@34.42.39.206,34.27.72.106,34.68.87.49 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.42.39.206,34.27.72.106,34.68.87.49:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Ej7MGui7HH ++ mktemp + local LAST_ERR=/tmp/tmp.edHbQaVHRV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.42.39.206,34.27.72.106,34.68.87.49:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ej7MGui7HH Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.42.39.206:27017,34.27.72.106:27017,34.68.87.49:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("76b6271f-6ed2-413e-93c2-9b1e27d8e05b") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.edHbQaVHRV + rm /tmp/tmp.Ej7MGui7HH /tmp/tmp.edHbQaVHRV + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@34.42.39.206,34.27.72.106,34.68.87.49 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@34.42.39.206,34.27.72.106,34.68.87.49 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.46MKGuyjdo +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nc1n0c9PNa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.46MKGuyjdo ++ cat /tmp/tmp.Nc1n0c9PNa ++ rm /tmp/tmp.46MKGuyjdo /tmp/tmp.Nc1n0c9PNa ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.42.39.206,34.27.72.106,34.68.87.49 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.42.39.206,34.27.72.106,34.68.87.49:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bPDc8T83uA ++ mktemp + local LAST_ERR=/tmp/tmp.mttASYPE9i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.42.39.206,34.27.72.106,34.68.87.49:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bPDc8T83uA Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.42.39.206:27017,34.27.72.106:27017,34.68.87.49:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ecc23e65-3e9c-41fa-8db3-5cd18a20a512") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.mttASYPE9i + rm /tmp/tmp.bPDc8T83uA /tmp/tmp.mttASYPE9i + return 0 + sleep 30 ++ get_service_ip local-balancer-rs0-0 ++ local service=local-balancer-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8uDzLg7aG8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.12WY3STQHG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8uDzLg7aG8 +++ cat /tmp/tmp.12WY3STQHG +++ rm /tmp/tmp.8uDzLg7aG8 /tmp/tmp.12WY3STQHG +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6QmyZIGb8u ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sml0EneV1u +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6QmyZIGb8u +++ cat /tmp/tmp.sml0EneV1u +++ rm /tmp/tmp.6QmyZIGb8u /tmp/tmp.sml0EneV1u +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vohSx7yazc +++ mktemp ++ local LAST_ERR=/tmp/tmp.w0PSydl4DL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vohSx7yazc ++ cat /tmp/tmp.w0PSydl4DL ++ rm /tmp/tmp.vohSx7yazc /tmp/tmp.w0PSydl4DL ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GpsBGVt7Px +++ mktemp ++ local LAST_ERR=/tmp/tmp.l2WgVyClUD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GpsBGVt7Px ++ cat /tmp/tmp.l2WgVyClUD ++ rm /tmp/tmp.GpsBGVt7Px /tmp/tmp.l2WgVyClUD ++ return 0 + compare_mongo_cmd find myApp:myPass@34.42.39.206 '' :27017 + local command=find + local uri=myApp:myPass@34.42.39.206 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T16:05:02+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.42.39.206 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.42.39.206 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2q0wc7SOr9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rbmaRuurmC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2q0wc7SOr9 ++ cat /tmp/tmp.rbmaRuurmC ++ rm /tmp/tmp.2q0wc7SOr9 /tmp/tmp.rbmaRuurmC ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.42.39.206 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.42.39.206:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Q8EtpPluyw ++ mktemp + local LAST_ERR=/tmp/tmp.WizwIeKmLi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.42.39.206:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Q8EtpPluyw + cat /tmp/tmp.WizwIeKmLi + rm /tmp/tmp.Q8EtpPluyw /tmp/tmp.WizwIeKmLi + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.TZvRRBkAdo/find ++ get_service_ip local-balancer-rs0-1 ++ local service=local-balancer-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Gd6MPCR7r3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jk4DfFnDHT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Gd6MPCR7r3 +++ cat /tmp/tmp.jk4DfFnDHT +++ rm /tmp/tmp.Gd6MPCR7r3 /tmp/tmp.jk4DfFnDHT +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Tc12WteOIo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wjKcj1O6OX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Tc12WteOIo +++ cat /tmp/tmp.wjKcj1O6OX +++ rm /tmp/tmp.Tc12WteOIo /tmp/tmp.wjKcj1O6OX +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8zmmtk5uiP +++ mktemp ++ local LAST_ERR=/tmp/tmp.4k02mh33ro ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8zmmtk5uiP ++ cat /tmp/tmp.4k02mh33ro ++ rm /tmp/tmp.8zmmtk5uiP /tmp/tmp.4k02mh33ro ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZEsrsMUy7O +++ mktemp ++ local LAST_ERR=/tmp/tmp.oploaOu8x4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZEsrsMUy7O ++ cat /tmp/tmp.oploaOu8x4 ++ rm /tmp/tmp.ZEsrsMUy7O /tmp/tmp.oploaOu8x4 ++ return 0 + compare_mongo_cmd find myApp:myPass@34.27.72.106 '' :27017 + local command=find + local uri=myApp:myPass@34.27.72.106 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T16:05:09+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.27.72.106 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.27.72.106 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.vgwEiDvTZq +++ mktemp ++ local LAST_ERR=/tmp/tmp.llIimspQpD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vgwEiDvTZq ++ cat /tmp/tmp.llIimspQpD ++ rm /tmp/tmp.vgwEiDvTZq /tmp/tmp.llIimspQpD ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.27.72.106 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.27.72.106:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bO8QhehzoC ++ mktemp + local LAST_ERR=/tmp/tmp.lTY7bh655B + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.27.72.106:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bO8QhehzoC + cat /tmp/tmp.lTY7bh655B + rm /tmp/tmp.bO8QhehzoC /tmp/tmp.lTY7bh655B + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.TZvRRBkAdo/find ++ get_service_ip local-balancer-rs0-2 ++ local service=local-balancer-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.u0yHa3YjQE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3i3Gy5ZAic +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.u0yHa3YjQE +++ cat /tmp/tmp.3i3Gy5ZAic +++ rm /tmp/tmp.u0yHa3YjQE /tmp/tmp.3i3Gy5ZAic +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fJnkEpruBw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oUEG57jjXW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fJnkEpruBw +++ cat /tmp/tmp.oUEG57jjXW +++ rm /tmp/tmp.fJnkEpruBw /tmp/tmp.oUEG57jjXW +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ egrep -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LzYsSG3kN6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.URHfPviYPD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LzYsSG3kN6 ++ cat /tmp/tmp.URHfPviYPD ++ rm /tmp/tmp.LzYsSG3kN6 /tmp/tmp.URHfPviYPD ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8ZlttYV4mY +++ mktemp ++ local LAST_ERR=/tmp/tmp.YJ4CczFilj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8ZlttYV4mY ++ cat /tmp/tmp.YJ4CczFilj ++ rm /tmp/tmp.8ZlttYV4mY /tmp/tmp.YJ4CczFilj ++ return 0 + compare_mongo_cmd find myApp:myPass@34.68.87.49 '' :27017 + local command=find + local uri=myApp:myPass@34.68.87.49 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T16:05:17+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.68.87.49 mongodb :27017 + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@34.68.87.49 + local driver=mongodb + local suffix=:27017 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HPOxYYVOKM +++ mktemp ++ local LAST_ERR=/tmp/tmp.M6mViRwYmh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HPOxYYVOKM ++ cat /tmp/tmp.M6mViRwYmh ++ rm /tmp/tmp.HPOxYYVOKM /tmp/tmp.M6mViRwYmh ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.68.87.49 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.68.87.49:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.TUN70k0dqD ++ mktemp + local LAST_ERR=/tmp/tmp.UwwhUKbajs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.68.87.49:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TUN70k0dqD + cat /tmp/tmp.UwwhUKbajs + rm /tmp/tmp.TUN70k0dqD /tmp/tmp.UwwhUKbajs + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.TZvRRBkAdo/find + [[ local-balancer-rs0 == \n\o\d\e\-\p\o\r\t\-\r\s\0 ]] + desc 'delete PSMDB cluster local-balancer-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster local-balancer-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Y9SpcyPpvw ++ mktemp + local LAST_ERR=/tmp/tmp.tQfhpZqSVS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y9SpcyPpvw perconaservermongodb.psmdb.percona.com "local-balancer" deleted + cat /tmp/tmp.tQfhpZqSVS + rm /tmp/tmp.Y9SpcyPpvw /tmp/tmp.tQfhpZqSVS + return 0 + desc 'check NodePort' + set +o xtrace ----------------------------------------------------------------------------------- check NodePort ----------------------------------------------------------------------------------- + check_cr_config node-port-rs0 + local cluster=node-port-rs0 + desc 'create PSMDB cluster node-port-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster node-port-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/node-port-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/node-port-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/node-port-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1897-ea494d1c"' + local LAST_OUT=/tmp/tmp.XKw8P6211M + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.irLIeG7d2k + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XKw8P6211M perconaservermongodb.psmdb.percona.com/node-port created + cat /tmp/tmp.irLIeG7d2k + rm /tmp/tmp.XKw8P6211M /tmp/tmp.irLIeG7d2k + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running node-port-rs0 3 false + local name=node-port-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=node-port ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod node-port-rs0-0 + local pod=node-port-rs0-0 + set +o xtrace waiting for pod/node-port-rs0-0 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod node-port-rs0-1 + local pod=node-port-rs0-1 + set +o xtrace waiting for pod/node-port-rs0-1 to be ready....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X9NrcHhpCD +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ix2A1VqHSY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X9NrcHhpCD ++ cat /tmp/tmp.Ix2A1VqHSY ++ rm /tmp/tmp.X9NrcHhpCD /tmp/tmp.Ix2A1VqHSY ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod node-port-rs0-2 + local pod=node-port-rs0-2 + set +o xtrace waiting for pod/node-port-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P0hvkNJq8K +++ mktemp ++ local LAST_ERR=/tmp/tmp.6v9ejj9ojH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P0hvkNJq8K ++ cat /tmp/tmp.6v9ejj9ojH ++ rm /tmp/tmp.P0hvkNJq8K /tmp/tmp.6v9ejj9ojH ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/node-port-rs0 + local resource=statefulset/node-port-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml + local new_result=/tmp/tmp.TZvRRBkAdo/statefulset_node-port-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/node-port-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-6273", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ANiYK3dusg ++ mktemp + local LAST_ERR=/tmp/tmp.CZBKos5x6O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/node-port-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ANiYK3dusg + cat /tmp/tmp.CZBKos5x6O + rm /tmp/tmp.ANiYK3dusg /tmp/tmp.CZBKos5x6O + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TZvRRBkAdo/statefulset_node-port-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TZvRRBkAdo/statefulset_node-port-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TZvRRBkAdo/statefulset_node-port-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml /tmp/tmp.TZvRRBkAdo/statefulset_node-port-rs0.yml + compare_kubectl service/node-port-rs0-0 + local resource=service/node-port-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml + local new_result=/tmp/tmp.TZvRRBkAdo/service_node-port-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-oc.yml ']' + kubectl_bin get -o yaml service/node-port-rs0-0 ++ mktemp + local LAST_OUT=/tmp/tmp.RqbmFYLs1l ++ mktemp + local LAST_ERR=/tmp/tmp.XUn6DFHbRZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/node-port-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-6273", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RqbmFYLs1l + cat /tmp/tmp.XUn6DFHbRZ + rm /tmp/tmp.RqbmFYLs1l /tmp/tmp.XUn6DFHbRZ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TZvRRBkAdo/service_node-port-rs0-0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TZvRRBkAdo/service_node-port-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TZvRRBkAdo/service_node-port-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml /tmp/tmp.TZvRRBkAdo/service_node-port-rs0-0.yml ++ get_service_ip node-port-rs0-0 ++ local service=node-port-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5xTwNG9v5V ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qsEpeqhSBn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5xTwNG9v5V +++ cat /tmp/tmp.qsEpeqhSBn +++ rm /tmp/tmp.5xTwNG9v5V /tmp/tmp.qsEpeqhSBn +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sFmr0DGuI1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pSYkAHlJsW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.sFmr0DGuI1 +++ cat /tmp/tmp.pSYkAHlJsW +++ rm /tmp/tmp.sFmr0DGuI1 /tmp/tmp.pSYkAHlJsW +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CN6jOG9mUs +++ mktemp ++ local LAST_ERR=/tmp/tmp.RlF2CKpUVK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CN6jOG9mUs ++ cat /tmp/tmp.RlF2CKpUVK ++ rm /tmp/tmp.CN6jOG9mUs /tmp/tmp.RlF2CKpUVK ++ return 0 ++ return ++ get_service_ip node-port-rs0-1 ++ local service=node-port-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5eDr9iPr3e ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uWRbQSAgiT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5eDr9iPr3e +++ cat /tmp/tmp.uWRbQSAgiT +++ rm /tmp/tmp.5eDr9iPr3e /tmp/tmp.uWRbQSAgiT +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RPEdajgaQg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qvjFsXTB1Z +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RPEdajgaQg +++ cat /tmp/tmp.qvjFsXTB1Z +++ rm /tmp/tmp.RPEdajgaQg /tmp/tmp.qvjFsXTB1Z +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vizBK3i9jX +++ mktemp ++ local LAST_ERR=/tmp/tmp.QS5FLHWf9i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vizBK3i9jX ++ cat /tmp/tmp.QS5FLHWf9i ++ rm /tmp/tmp.vizBK3i9jX /tmp/tmp.QS5FLHWf9i ++ return 0 ++ return ++ get_service_ip node-port-rs0-2 ++ local service=node-port-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AtsZVFSKGa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CoQQ0I7TXF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.AtsZVFSKGa +++ cat /tmp/tmp.CoQQ0I7TXF +++ rm /tmp/tmp.AtsZVFSKGa /tmp/tmp.CoQQ0I7TXF +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iAkNYAJ5Hi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Z1JZBYXa3k +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.iAkNYAJ5Hi +++ cat /tmp/tmp.Z1JZBYXa3k +++ rm /tmp/tmp.iAkNYAJ5Hi /tmp/tmp.Z1JZBYXa3k +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yDBPyuASlI +++ mktemp ++ local LAST_ERR=/tmp/tmp.FKobJCHXaB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yDBPyuASlI ++ cat /tmp/tmp.FKobJCHXaB ++ rm /tmp/tmp.yDBPyuASlI /tmp/tmp.FKobJCHXaB ++ return 0 ++ return + local URI=34.118.239.236,34.118.230.187,34.118.237.10 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@34.118.239.236,34.118.230.187,34.118.237.10 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@34.118.239.236,34.118.230.187,34.118.237.10 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eqqX22jPyH +++ mktemp ++ local LAST_ERR=/tmp/tmp.6YfpVFLuuW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eqqX22jPyH ++ cat /tmp/tmp.6YfpVFLuuW ++ rm /tmp/tmp.eqqX22jPyH /tmp/tmp.6YfpVFLuuW ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ userAdmin:userAdmin123456@34.118.239.236,34.118.230.187,34.118.237.10 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.239.236,34.118.230.187,34.118.237.10:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.rGVQWS2GRI ++ mktemp + local LAST_ERR=/tmp/tmp.jIyA03PN4l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.239.236,34.118.230.187,34.118.237.10:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rGVQWS2GRI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.239.236:27017,34.118.230.187:27017,34.118.237.10:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("9c695389-2f49-47d2-b490-a408167e2db2") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.jIyA03PN4l + rm /tmp/tmp.rGVQWS2GRI /tmp/tmp.jIyA03PN4l + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@34.118.239.236,34.118.230.187,34.118.237.10 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@34.118.239.236,34.118.230.187,34.118.237.10 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vO69Bh5PvE +++ mktemp ++ local LAST_ERR=/tmp/tmp.bNYESPFy4O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vO69Bh5PvE ++ cat /tmp/tmp.bNYESPFy4O ++ rm /tmp/tmp.vO69Bh5PvE /tmp/tmp.bNYESPFy4O ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.118.239.236,34.118.230.187,34.118.237.10 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.239.236,34.118.230.187,34.118.237.10:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8LHzb7f5vy ++ mktemp + local LAST_ERR=/tmp/tmp.WPDqoPNZj2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.239.236,34.118.230.187,34.118.237.10:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8LHzb7f5vy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.239.236:27017,34.118.230.187:27017,34.118.237.10:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("808a1850-786c-45de-aba4-50390a675fc0") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.WPDqoPNZj2 + rm /tmp/tmp.8LHzb7f5vy /tmp/tmp.WPDqoPNZj2 + return 0 + sleep 30 ++ get_service_ip node-port-rs0-0 ++ local service=node-port-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3PyHkSqmHh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fY3BHKR7O5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3PyHkSqmHh +++ cat /tmp/tmp.fY3BHKR7O5 +++ rm /tmp/tmp.3PyHkSqmHh /tmp/tmp.fY3BHKR7O5 +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bAadGy0iwe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZEO1ZXk8gA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bAadGy0iwe +++ cat /tmp/tmp.ZEO1ZXk8gA +++ rm /tmp/tmp.bAadGy0iwe /tmp/tmp.ZEO1ZXk8gA +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1h38zZumxo +++ mktemp ++ local LAST_ERR=/tmp/tmp.9RgBx4KEe6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1h38zZumxo ++ cat /tmp/tmp.9RgBx4KEe6 ++ rm /tmp/tmp.1h38zZumxo /tmp/tmp.9RgBx4KEe6 ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.239.236 '' :27017 + local command=find + local uri=myApp:myPass@34.118.239.236 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T16:08:23+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.239.236 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.239.236 + local driver=mongodb + local suffix=:27017 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.IvaPW1htt2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.JG8Kkmx6g9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IvaPW1htt2 ++ cat /tmp/tmp.JG8Kkmx6g9 ++ rm /tmp/tmp.IvaPW1htt2 /tmp/tmp.JG8Kkmx6g9 ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.118.239.236 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.239.236:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RDv3ogjwYB ++ mktemp + local LAST_ERR=/tmp/tmp.768kFTTHEL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.239.236:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RDv3ogjwYB + cat /tmp/tmp.768kFTTHEL + rm /tmp/tmp.RDv3ogjwYB /tmp/tmp.768kFTTHEL + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.TZvRRBkAdo/find ++ get_service_ip node-port-rs0-1 ++ local service=node-port-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.B3dPQ7YeGO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IJUdJkclZs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.B3dPQ7YeGO +++ cat /tmp/tmp.IJUdJkclZs +++ rm /tmp/tmp.B3dPQ7YeGO /tmp/tmp.IJUdJkclZs +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.swPdXY64Y8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1ReyyuGn44 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.swPdXY64Y8 +++ cat /tmp/tmp.1ReyyuGn44 +++ rm /tmp/tmp.swPdXY64Y8 /tmp/tmp.1ReyyuGn44 +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fu2cKPTc2y +++ mktemp ++ local LAST_ERR=/tmp/tmp.FY4p7ayndj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fu2cKPTc2y ++ cat /tmp/tmp.FY4p7ayndj ++ rm /tmp/tmp.fu2cKPTc2y /tmp/tmp.FY4p7ayndj ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.230.187 '' :27017 + local command=find + local uri=myApp:myPass@34.118.230.187 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T16:08:32+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.230.187 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.230.187 + local driver=mongodb + local suffix=:27017 + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.ALKYfZ3Sqj +++ mktemp ++ local LAST_ERR=/tmp/tmp.DZYQrLQjzz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ALKYfZ3Sqj ++ cat /tmp/tmp.DZYQrLQjzz ++ rm /tmp/tmp.ALKYfZ3Sqj /tmp/tmp.DZYQrLQjzz ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.118.230.187 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.230.187:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.6vyuNQD3eF ++ mktemp + local LAST_ERR=/tmp/tmp.Ca0EwtiZkr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.230.187:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6vyuNQD3eF + cat /tmp/tmp.Ca0EwtiZkr + rm /tmp/tmp.6vyuNQD3eF /tmp/tmp.Ca0EwtiZkr + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.TZvRRBkAdo/find ++ get_service_ip node-port-rs0-2 ++ local service=node-port-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QU2399osWL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.baocpKUeTg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QU2399osWL +++ cat /tmp/tmp.baocpKUeTg +++ rm /tmp/tmp.QU2399osWL /tmp/tmp.baocpKUeTg +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WXaXb4EVDS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ILuJt47tJC +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WXaXb4EVDS +++ cat /tmp/tmp.ILuJt47tJC +++ rm /tmp/tmp.WXaXb4EVDS /tmp/tmp.ILuJt47tJC +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C7uCqlI6c0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ANBXflkaX6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C7uCqlI6c0 ++ cat /tmp/tmp.ANBXflkaX6 ++ rm /tmp/tmp.C7uCqlI6c0 /tmp/tmp.ANBXflkaX6 ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.237.10 '' :27017 + local command=find + local uri=myApp:myPass@34.118.237.10 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-19T16:08:41+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.237.10 mongodb :27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.237.10 + local driver=mongodb + local suffix=:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H6f8qiyXbM +++ mktemp ++ local LAST_ERR=/tmp/tmp.blBosvcjE7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H6f8qiyXbM ++ cat /tmp/tmp.blBosvcjE7 ++ rm /tmp/tmp.H6f8qiyXbM /tmp/tmp.blBosvcjE7 ++ return 0 + local client_container=psmdb-client-66f577db5f-hp6kq + local mongo_flag= + [[ myApp:myPass@34.118.237.10 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.237.10:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.h0T3B2JItA ++ mktemp + local LAST_ERR=/tmp/tmp.HUioqLKCJy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hp6kq -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.237.10:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.h0T3B2JItA + cat /tmp/tmp.HUioqLKCJy + rm /tmp/tmp.h0T3B2JItA /tmp/tmp.HUioqLKCJy + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.TZvRRBkAdo/find + [[ node-port-rs0 == \n\o\d\e\-\p\o\r\t\-\r\s\0 ]] + desc 'add service-per-pod label and annotation' + set +o xtrace ----------------------------------------------------------------------------------- add service-per-pod label and annotation ----------------------------------------------------------------------------------- ++ kubectl_bin get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EgYlpj5ulz +++ mktemp ++ local LAST_ERR=/tmp/tmp.zTh1Go61UV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EgYlpj5ulz ++ cat /tmp/tmp.zTh1Go61UV ++ rm /tmp/tmp.EgYlpj5ulz /tmp/tmp.zTh1Go61UV ++ return 0 + old_node_port=30879 + kubectl_bin patch psmdb node-port --type=json --patch '[ { "op": "add", "path": "/spec/replsets/0/expose/annotations", "value": { "test": "service-per-pod", } }, { "op": "add", "path": "/spec/replsets/0/expose/labels", "value": { "test": "service-per-pod", } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.xtc4cEasdX ++ mktemp + local LAST_ERR=/tmp/tmp.wshvdunWOr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb node-port --type=json --patch '[ { "op": "add", "path": "/spec/replsets/0/expose/annotations", "value": { "test": "service-per-pod", } }, { "op": "add", "path": "/spec/replsets/0/expose/labels", "value": { "test": "service-per-pod", } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xtc4cEasdX perconaservermongodb.psmdb.percona.com/node-port patched + cat /tmp/tmp.wshvdunWOr + rm /tmp/tmp.xtc4cEasdX /tmp/tmp.wshvdunWOr + return 0 + sleep 5 + desc 'check if service created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service created with expected config ----------------------------------------------------------------------------------- + compare_kubectl service/node-port-rs0-0 -updated + local resource=service/node-port-rs0-0 + local postfix=-updated + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml + local new_result=/tmp/tmp.TZvRRBkAdo/service_node-port-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated-oc.yml ']' + kubectl_bin get -o yaml service/node-port-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-6273", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | ++ mktemp (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.ykKnETdpS8 ++ mktemp + local LAST_ERR=/tmp/tmp.5jEgAcr7tk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/node-port-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ykKnETdpS8 + cat /tmp/tmp.5jEgAcr7tk + rm /tmp/tmp.ykKnETdpS8 /tmp/tmp.5jEgAcr7tk + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.TZvRRBkAdo/service_node-port-rs0-0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.TZvRRBkAdo/service_node-port-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.TZvRRBkAdo/service_node-port-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml /tmp/tmp.TZvRRBkAdo/service_node-port-rs0-0.yml ++ kubectl_bin get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BrK4gnXq9A +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jmn0VBJlHF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BrK4gnXq9A ++ cat /tmp/tmp.Jmn0VBJlHF ++ rm /tmp/tmp.BrK4gnXq9A /tmp/tmp.Jmn0VBJlHF ++ return 0 + current_node_port=30879 + [[ 30879 != \3\0\8\7\9 ]] + desc 'delete PSMDB cluster node-port-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster node-port-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/node-port-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.7X9H2lPGCd ++ mktemp + local LAST_ERR=/tmp/tmp.FEkSIb11a9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/node-port-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7X9H2lPGCd perconaservermongodb.psmdb.percona.com "node-port" deleted + cat /tmp/tmp.FEkSIb11a9 + rm /tmp/tmp.7X9H2lPGCd /tmp/tmp.FEkSIb11a9 + return 0 + desc 'check Mongos in sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- check Mongos in sharded cluster ----------------------------------------------------------------------------------- + local cluster=some-name + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/sharded.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/sharded.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/e2e-tests/service-per-pod/conf/sharded.yml + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1897-ea494d1c"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + local LAST_OUT=/tmp/tmp.HHsqBS3n3d + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' ++ mktemp + local LAST_ERR=/tmp/tmp.jQtc6ZaWp9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HHsqBS3n3d perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.jQtc6ZaWp9 + rm /tmp/tmp.HHsqBS3n3d /tmp/tmp.jQtc6ZaWp9 + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AiUiAjfTfc +++ mktemp ++ local LAST_ERR=/tmp/tmp.8Cueqpn9oz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AiUiAjfTfc ++ cat /tmp/tmp.8Cueqpn9oz ++ rm /tmp/tmp.AiUiAjfTfc /tmp/tmp.8Cueqpn9oz ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yd3JtpC6by +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mo3ZDyc4G5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yd3JtpC6by ++ cat /tmp/tmp.Mo3ZDyc4G5 ++ rm /tmp/tmp.yd3JtpC6by /tmp/tmp.Mo3ZDyc4G5 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................. + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4zgRV3gaM5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5y3gfgL4Qy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4zgRV3gaM5 ++ cat /tmp/tmp.5y3gfgL4Qy ++ rm /tmp/tmp.4zgRV3gaM5 /tmp/tmp.5y3gfgL4Qy ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qc5FymgTp4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.v4yunEwcOZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qc5FymgTp4 ++ cat /tmp/tmp.v4yunEwcOZ ++ rm /tmp/tmp.qc5FymgTp4 /tmp/tmp.v4yunEwcOZ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ml3TW5at6c +++ mktemp ++ local LAST_ERR=/tmp/tmp.HJy65TtMol ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ml3TW5at6c ++ cat /tmp/tmp.HJy65TtMol ++ rm /tmp/tmp.Ml3TW5at6c /tmp/tmp.HJy65TtMol ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NmPsLq5azg +++ mktemp ++ local LAST_ERR=/tmp/tmp.4ebWOwsCW3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NmPsLq5azg ++ cat /tmp/tmp.4ebWOwsCW3 ++ rm /tmp/tmp.NmPsLq5azg /tmp/tmp.4ebWOwsCW3 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'enabling servicePerPod for mongos' + set +o xtrace ----------------------------------------------------------------------------------- enabling servicePerPod for mongos ----------------------------------------------------------------------------------- + kubectl patch psmdb some-name --type=merge -p '{"spec":{"sharding":{"mongos":{"expose":{"servicePerPod":true}}}}}' perconaservermongodb.psmdb.percona.com/some-name patched + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oECtM1o8Xh +++ mktemp ++ local LAST_ERR=/tmp/tmp.OgUzzXXOhj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oECtM1o8Xh ++ cat /tmp/tmp.OgUzzXXOhj ++ rm /tmp/tmp.oECtM1o8Xh /tmp/tmp.OgUzzXXOhj ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.J0anReQ8xH +++ mktemp ++ local LAST_ERR=/tmp/tmp.TPEOUxaX3g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J0anReQ8xH ++ cat /tmp/tmp.TPEOUxaX3g ++ rm /tmp/tmp.J0anReQ8xH /tmp/tmp.TPEOUxaX3g ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + check_service present some-name-mongos-0 + state=present + svc_name=some-name-mongos-0 + '[' present = present ']' + echo -n 'check that some-name-mongos-0 was created' check that some-name-mongos-0 was created+ local timeout=0 + grep -vq NotFound + kubectl_bin get service/some-name-mongos-0 -o 'jsonpath={.spec.type}' + echo .OK .OK + check_service present some-name-mongos-1 + state=present + svc_name=some-name-mongos-1 + '[' present = present ']' + echo -n 'check that some-name-mongos-1 was created' check that some-name-mongos-1 was created+ local timeout=0 + kubectl_bin get service/some-name-mongos-1 -o 'jsonpath={.spec.type}' + grep -vq NotFound + echo .OK .OK + check_service present some-name-mongos-2 + state=present + svc_name=some-name-mongos-2 + '[' present = present ']' + echo -n 'check that some-name-mongos-2 was created' check that some-name-mongos-2 was created+ local timeout=0 + kubectl_bin get service/some-name-mongos-2 -o 'jsonpath={.spec.type}' + grep -vq NotFound + echo .OK .OK + check_service removed some-name-mongos + state=removed + svc_name=some-name-mongos + '[' removed = present ']' + '[' removed = removed ']' + echo -n 'check that some-name-mongos was removed' check that some-name-mongos was removed++ kubectl_bin get service/some-name-mongos -o 'jsonpath={.spec.type}' ++ grep NotFound + [[ -z Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found ]] + echo .OK .OK + destroy service-per-pod-6273 + local namespace=service-per-pod-6273 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.FoHuZC3TcX ++ mktemp + local LAST_ERR=/tmp/tmp.lrxaAvoibC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FoHuZC3TcX customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.lrxaAvoibC + rm /tmp/tmp.FoHuZC3TcX /tmp/tmp.lrxaAvoibC + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.egVLf72yg6 ++ mktemp + local LAST_ERR=/tmp/tmp.BR9bRNsAms + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.egVLf72yg6 + cat /tmp/tmp.BR9bRNsAms + rm /tmp/tmp.egVLf72yg6 /tmp/tmp.BR9bRNsAms + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.czJPC2KzCZ ++ mktemp + local LAST_ERR=/tmp/tmp.1RssLAOfkY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.czJPC2KzCZ + cat /tmp/tmp.1RssLAOfkY + rm /tmp/tmp.czJPC2KzCZ /tmp/tmp.1RssLAOfkY + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.xhlCiIfdZw ++ mktemp + local LAST_ERR=/tmp/tmp.HV5GnD9tZO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xhlCiIfdZw + cat /tmp/tmp.HV5GnD9tZO + rm /tmp/tmp.xhlCiIfdZw /tmp/tmp.HV5GnD9tZO + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.enmOAaCkBH ++ mktemp + local LAST_ERR=/tmp/tmp.72YlprwUki + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1897/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.enmOAaCkBH clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.72YlprwUki + rm /tmp/tmp.enmOAaCkBH /tmp/tmp.72YlprwUki + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.LzsBtkluMO ++ mktemp + local LAST_ERR=/tmp/tmp.OrxTjDMUwI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.LzsBtkluMO namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.OrxTjDMUwI Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.LzsBtkluMO namespace "cert-manager" deleted + cat /tmp/tmp.OrxTjDMUwI Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.LzsBtkluMO + cat /tmp/tmp.OrxTjDMUwI Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.LzsBtkluMO + cat /tmp/tmp.OrxTjDMUwI Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.LzsBtkluMO /tmp/tmp.OrxTjDMUwI + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.TZvRRBkAdo + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + kubectl_bin delete --grace-period=0 --force=true namespace service-per-pod-6273 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.n01uWwDUsC ++ mktemp + local LAST_ERR=/tmp/tmp.Ln9uHH03Uc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace service-per-pod-6273 + local LAST_OUT=/tmp/tmp.7kei8S9x9P + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_ERR=/tmp/tmp.njM1C6mhWS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator