Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/logs/cross-site-sharded.log WARNING: version difference between client (1.32) and server (1.29) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.32) and server (1.29) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.32) and server (1.29) exceeds the supported minor version skew of +/-1 ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod8.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod8.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ /usr/bin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ local 'cli=mongod --version' +++ local pod_name=20652 +++ kubectl_bin -n default run 20652 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oJIz816DQv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YM1IpZheVD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default run 20652 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.oJIz816DQv +++ cat /tmp/tmp.YM1IpZheVD +++ rm /tmp/tmp.oJIz816DQv /tmp/tmp.YM1IpZheVD +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/20652 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.E0TW8scGrA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PcRpnpIuUc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/20652 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.E0TW8scGrA +++ cat /tmp/tmp.PcRpnpIuUc +++ rm /tmp/tmp.E0TW8scGrA /tmp/tmp.PcRpnpIuUc +++ return 0 ++++ kubectl_bin -n default exec 20652 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.eVuAGDETOV +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.K0brJIN7NA ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default exec 20652 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.eVuAGDETOV ++++ cat /tmp/tmp.K0brJIN7NA ++++ rm /tmp/tmp.eVuAGDETOV /tmp/tmp.K0brJIN7NA ++++ return 0 +++ local 'output=db version v8.0.4-2 Build Info: { "version": "8.0.4-2", "gitVersion": "4050406b7eb2895c2810029526e5439b4ebc12a5", "openSSLVersion": "OpenSSL 3.2.2 4 Jun 2024", "modules": [], "proFeatures": [], "allocator": "tcmalloc-google", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/20652 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FHVvbqkRdC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NwbTnwPS4d +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default delete pod/20652 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FHVvbqkRdC +++ cat /tmp/tmp.NwbTnwPS4d Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.FHVvbqkRdC /tmp/tmp.NwbTnwPS4d +++ return 0 +++ echo db version v8.0.4-2 Build Info: '{' '"version":' '"8.0.4-2",' '"gitVersion":' '"4050406b7eb2895c2810029526e5439b4ebc12a5",' '"openSSLVersion":' '"OpenSSL' 3.2.2 4 Jun '2024",' '"modules":' '[],' '"proFeatures":' '[],' '"allocator":' '"tcmalloc-google",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=8.0.4-2 ++ [[ ! 8.0.4-2 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 8.0.4-2 + FULL_VER=8.0.4-2 + MONGO_VER=8.0 + unset OPERATOR_NS + main_cluster=cross-site-sharded-main + replica_cluster=cross-site-sharded-replica + desc 'create main cluster' + set +o xtrace ----------------------------------------------------------------------------------- create main cluster ----------------------------------------------------------------------------------- + create_infra cross-site-sharded-6308 + local ns=cross-site-sharded-6308 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.aY9FMfYLVR ++ mktemp + local LAST_ERR=/tmp/tmp.2nhUkecD54 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aY9FMfYLVR customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.2nhUkecD54 + rm /tmp/tmp.aY9FMfYLVR /tmp/tmp.2nhUkecD54 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.b8v6LeB2at ++ mktemp + local LAST_ERR=/tmp/tmp.DplKOPiOnZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b8v6LeB2at + cat /tmp/tmp.DplKOPiOnZ + rm /tmp/tmp.b8v6LeB2at /tmp/tmp.DplKOPiOnZ + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.hOmQoY2W07 ++ mktemp + local LAST_ERR=/tmp/tmp.AKSQofAv7v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hOmQoY2W07 + cat /tmp/tmp.AKSQofAv7v + rm /tmp/tmp.hOmQoY2W07 /tmp/tmp.AKSQofAv7v + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.tW6rkdIC9v ++ mktemp + local LAST_ERR=/tmp/tmp.Mxyaovyg9T + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tW6rkdIC9v + cat /tmp/tmp.Mxyaovyg9T + rm /tmp/tmp.tW6rkdIC9v /tmp/tmp.Mxyaovyg9T + return 0 + local rbac_yaml=rbac.yaml + '[' -n '' ']' + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.j7BcW5NHo0 ++ mktemp + local LAST_ERR=/tmp/tmp.dw81oM1HVW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.j7BcW5NHo0 role.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted serviceaccount "percona-server-mongodb-operator" deleted rolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.dw81oM1HVW + rm /tmp/tmp.j7BcW5NHo0 /tmp/tmp.dw81oM1HVW + return 0 + check_crd_for_deletion PR-1776-54911d4c + local git_tag=PR-1776-54911d4c ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1776-54911d4c/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QoGrAlXWbV +++ mktemp ++ local LAST_ERR=/tmp/tmp.KGg23aueR1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.QoGrAlXWbV ++ cat /tmp/tmp.KGg23aueR1 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.QoGrAlXWbV ++ cat /tmp/tmp.KGg23aueR1 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.QoGrAlXWbV ++ cat /tmp/tmp.KGg23aueR1 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.QoGrAlXWbV ++ cat /tmp/tmp.KGg23aueR1 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.QoGrAlXWbV /tmp/tmp.KGg23aueR1 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n '' ']' + create_namespace cross-site-sharded-6308 + local namespace=cross-site-sharded-6308 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces cross-site-sharded-6308' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces cross-site-sharded-6308 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace cross-site-sharded-6308 --ignore-not-found ++ mktemp ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.GjG58mW01S + local LAST_OUT=/tmp/tmp.TvjkMl2aoK ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.AqyUAihkLK + local LAST_ERR=/tmp/tmp.sM19Kaa0jW + local exit_status=0 + local exit_status=0 + local timeout=4 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace cross-site-sharded-6308 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GjG58mW01S + cat /tmp/tmp.sM19Kaa0jW + rm /tmp/tmp.GjG58mW01S /tmp/tmp.sM19Kaa0jW + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TvjkMl2aoK + cat /tmp/tmp.AqyUAihkLK + rm /tmp/tmp.TvjkMl2aoK /tmp/tmp.AqyUAihkLK + return 0 + kubectl_bin wait --for=delete namespace cross-site-sharded-6308 ++ mktemp + local LAST_OUT=/tmp/tmp.aNvj4bO90E ++ mktemp + local LAST_ERR=/tmp/tmp.wpsBfhifAr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace cross-site-sharded-6308 namespace "cross-site-sharded-7706" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aNvj4bO90E + cat /tmp/tmp.wpsBfhifAr + rm /tmp/tmp.aNvj4bO90E /tmp/tmp.wpsBfhifAr + return 0 + desc 'create namespace cross-site-sharded-6308' + set +o xtrace ----------------------------------------------------------------------------------- create namespace cross-site-sharded-6308 ----------------------------------------------------------------------------------- + kubectl_bin create namespace cross-site-sharded-6308 ++ mktemp + local LAST_OUT=/tmp/tmp.zb4RARwrvb ++ mktemp + local LAST_ERR=/tmp/tmp.m5B4r2d44Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cross-site-sharded-6308 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zb4RARwrvb namespace/cross-site-sharded-6308 created + cat /tmp/tmp.m5B4r2d44Q + rm /tmp/tmp.zb4RARwrvb /tmp/tmp.m5B4r2d44Q + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.72XzQM2Af4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z6nWvhkqLx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.72XzQM2Af4 ++ cat /tmp/tmp.Z6nWvhkqLx ++ rm /tmp/tmp.72XzQM2Af4 /tmp/tmp.Z6nWvhkqLx ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1776-54911d4c-5-cluster1 --namespace=cross-site-sharded-6308 ++ mktemp + local LAST_OUT=/tmp/tmp.6FNzYQqhIk ++ mktemp + local LAST_ERR=/tmp/tmp.qp9eNJLoGC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1776-54911d4c-5-cluster1 --namespace=cross-site-sharded-6308 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6FNzYQqhIk Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1776-54911d4c-5-cluster1" modified. + cat /tmp/tmp.qp9eNJLoGC + rm /tmp/tmp.6FNzYQqhIk /tmp/tmp.qp9eNJLoGC + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/cross-site-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.7EEINACQT7 ++ mktemp + local LAST_ERR=/tmp/tmp.8yrnyyPO3c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7EEINACQT7 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.8yrnyyPO3c + rm /tmp/tmp.7EEINACQT7 /tmp/tmp.8yrnyyPO3c + return 0 + '[' -n '' ']' + apply_rbac rbac + local operator_namespace=psmdb-operator + local rbac=rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/rbac.yaml + kubectl_bin apply -f - + sed -e 's^namespace: .*^namespace: psmdb-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.2eEfM73P5q ++ mktemp + local LAST_ERR=/tmp/tmp.XKtifGjsWB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2eEfM73P5q role.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created rolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.XKtifGjsWB + rm /tmp/tmp.2eEfM73P5q /tmp/tmp.XKtifGjsWB + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1776-54911d4c") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.YJ1FQ7OrVV ++ mktemp + local LAST_ERR=/tmp/tmp.6c3KWTKSNB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YJ1FQ7OrVV deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.6c3KWTKSNB + rm /tmp/tmp.YJ1FQ7OrVV /tmp/tmp.6c3KWTKSNB + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vBuCpo5BH8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uK3ClKnhJn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vBuCpo5BH8 ++ cat /tmp/tmp.uK3ClKnhJn error: error executing jsonpath "{.items[].metadata.name}": Error executing template: array index out of bounds: index 0, length 0. Printing more information for debugging the template: template was: {.items[].metadata.name} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "items":[]interface {}{}, "kind":"List", "metadata":map[string]interface {}{"resourceVersion":""}} ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vBuCpo5BH8 ++ cat /tmp/tmp.uK3ClKnhJn error: error executing jsonpath "{.items[].metadata.name}": Error executing template: array index out of bounds: index 0, length 0. Printing more information for debugging the template: template was: {.items[].metadata.name} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "items":[]interface {}{}, "kind":"List", "metadata":map[string]interface {}{"resourceVersion":""}} ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.vBuCpo5BH8 ++ cat /tmp/tmp.uK3ClKnhJn error: error executing jsonpath "{.items[].metadata.name}": Error executing template: array index out of bounds: index 0, length 0. Printing more information for debugging the template: template was: {.items[].metadata.name} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "items":[]interface {}{}, "kind":"List", "metadata":map[string]interface {}{"resourceVersion":""}} ++ sleep 8 ++ cat /tmp/tmp.vBuCpo5BH8 ++ cat /tmp/tmp.uK3ClKnhJn error: error executing jsonpath "{.items[].metadata.name}": Error executing template: array index out of bounds: index 0, length 0. Printing more information for debugging the template: template was: {.items[].metadata.name} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "items":[]interface {}{}, "kind":"List", "metadata":map[string]interface {}{"resourceVersion":""}} ++ rm /tmp/tmp.vBuCpo5BH8 /tmp/tmp.uK3ClKnhJn ++ return 1 + wait_pod + local pod= + set +o xtrace waiting for pod/ to be ready........................................................................................................................................................................................................................................................................................................................................................................error: arguments in resource/name form must have a single resource and name error: arguments in resource/name form must have a single resource and name error: arguments in resource/name form must have a single resource and name error: arguments in resource/name form must have a single resource and name