++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1556/e2e-tests/logs/split-horizon.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1556/e2e-tests/logs/split-horizon.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1556/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ grep '\-eks\-' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ grep gke +++ jq -r .serverVersion.gitVersion WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' v1.26.15-gke.1320000 ']' ++ GKE=1 +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 + create_infra split-horizon-19772 + local ns=split-horizon-19772 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- namespace/psmdb-operator created Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1556-43640f06-2-cluster1" modified. ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created deployment.apps/percona-server-mongodb-operator created waiting for pod/percona-server-mongodb-operator-5b96847cc9-rxpqm to be ready.OK ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- cleaned up old namespaces split-horizon-19772 ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- create namespace split-horizon-19772 ----------------------------------------------------------------------------------- namespace/split-horizon-19772 created Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1556-43640f06-2-cluster1" modified. secret/some-users created secret/some-name-ssl created secret/some-name-ssl-internal created deployment.apps/psmdb-client created perconaservermongodb.psmdb.percona.com/some-name created waiting for pod/some-name-rs0-0 to be ready.............OK waiting for pod/some-name-rs0-1 to be ready............OK waiting for pod/some-name-rs0-2 to be ready...........OK Waiting for cluster readyness waiting for cluster readynessdeployment.apps/psmdb-client patched waiting for pod/psmdb-client-568d7cf949-hvnss to be ready.OK ----------------------------------------------------------------------------------- configuring horizons ----------------------------------------------------------------------------------- perconaservermongodb.psmdb.percona.com/some-name configured waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readyness+ run_mongo 'rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet + local 'command=rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))' + local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bweEguRiN4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jyu47NTASs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.bweEguRiN4 ++ cat /tmp/tmp.Jyu47NTASs ++ rm /tmp/tmp.bweEguRiN4 /tmp/tmp.Jyu47NTASs ++ return 0 + local client_container=psmdb-client-568d7cf949-hvnss + local mongo_flag=--quiet + [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-568d7cf949-hvnss -- bash -c 'printf '\''rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ mktemp + local LAST_OUT=/tmp/tmp.1pQjBPsAXk ++ mktemp + local LAST_ERR=/tmp/tmp.Q60o7LitV4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-568d7cf949-hvnss -- bash -c 'printf '\''rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.1pQjBPsAXk + cat /tmp/tmp.Q60o7LitV4 + rm /tmp/tmp.1pQjBPsAXk /tmp/tmp.Q60o7LitV4 + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1556/e2e-tests/split-horizon/compare/horizons-3.json /tmp/tmp.RwiScL1ROy/horizons-3.json ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ run_mongo_tls 'db.isMaster().ismaster' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet ++ local 'command=db.isMaster().ismaster' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ grep -v certificateNames +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Uu4h8ZnCkw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wQcVxCFSBB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 0 ']' +++ break +++ cat /tmp/tmp.Uu4h8ZnCkw +++ cat /tmp/tmp.wQcVxCFSBB +++ rm /tmp/tmp.Uu4h8ZnCkw /tmp/tmp.wQcVxCFSBB +++ return 0 ++ local client_container=psmdb-client-568d7cf949-hvnss ++ local mongo_flag=--quiet ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-568d7cf949-hvnss -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FltOo6jTPA +++ mktemp ++ local LAST_ERR=/tmp/tmp.ERARHzQlgI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-568d7cf949-hvnss -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.FltOo6jTPA ++ cat /tmp/tmp.ERARHzQlgI ++ rm /tmp/tmp.FltOo6jTPA /tmp/tmp.ERARHzQlgI ++ return 0 + isMaster=true + '[' true '!=' true ']' + run_mongo_tls 'rs.stepDown()' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet + local 'command=rs.stepDown()' + local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zRHduJVDnd +++ mktemp ++ local LAST_ERR=/tmp/tmp.2qmsTKXF6P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.zRHduJVDnd ++ cat /tmp/tmp.2qmsTKXF6P ++ rm /tmp/tmp.zRHduJVDnd /tmp/tmp.2qmsTKXF6P ++ return 0 + local client_container=psmdb-client-568d7cf949-hvnss + local mongo_flag=--quiet + [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-568d7cf949-hvnss -- bash -c 'printf '\''rs.stepDown()\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ mktemp + local LAST_OUT=/tmp/tmp.Ar24wnZJEf ++ mktemp + local LAST_ERR=/tmp/tmp.oFD01VbF53 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-568d7cf949-hvnss -- bash -c 'printf '\''rs.stepDown()\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.Ar24wnZJEf {"t":{"$date":"2024-05-24T16:11:19.860Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-24T16:11:19.862Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-24T16:11:19.867Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"js","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-24T16:11:19.888Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-24T16:11:19.891Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-2.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-24T16:11:19.891Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-24T16:11:19.897Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-2.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1716567077, 2), "signature" : { "hash" : BinData(0,"rzgs8hQtrOw76wOpzuSr0s3Ifto="), "keyId" : NumberLong("7372599074853224455") } }, "operationTime" : Timestamp(1716567077, 2) } {"t":{"$date":"2024-05-24T16:11:19.923Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"js","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} + cat /tmp/tmp.oFD01VbF53 + rm /tmp/tmp.Ar24wnZJEf /tmp/tmp.oFD01VbF53 + return 0 + sleep 10 ++ run_mongo_tls 'db.isMaster().ismaster' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet ++ local 'command=db.isMaster().ismaster' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ grep -v certificateNames +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qGw8EzvzgD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.p54X19SRuI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 0 ']' +++ break +++ cat /tmp/tmp.qGw8EzvzgD +++ cat /tmp/tmp.p54X19SRuI +++ rm /tmp/tmp.qGw8EzvzgD /tmp/tmp.p54X19SRuI +++ return 0 ++ local client_container=psmdb-client-568d7cf949-hvnss ++ local mongo_flag=--quiet ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-568d7cf949-hvnss -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JGLg9jdzqK +++ mktemp ++ local LAST_ERR=/tmp/tmp.zvEUqpRlSt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-568d7cf949-hvnss -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.JGLg9jdzqK ++ cat /tmp/tmp.zvEUqpRlSt ++ rm /tmp/tmp.JGLg9jdzqK /tmp/tmp.zvEUqpRlSt ++ return 0 + isMaster=true + '[' true '!=' true ']' + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1556/e2e-tests/split-horizon/conf/some-name-5horizons.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1556/e2e-tests/split-horizon/conf/some-name-5horizons.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1556/e2e-tests/split-horizon/conf/some-name-5horizons.yml + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1556-43640f06"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.mRgiDCtyty ++ mktemp + local LAST_ERR=/tmp/tmp.CDcNIUZx9d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.mRgiDCtyty perconaservermongodb.psmdb.percona.com/some-name configured + cat /tmp/tmp.CDcNIUZx9d + rm /tmp/tmp.mRgiDCtyty /tmp/tmp.CDcNIUZx9d + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 0 == 1 ]] + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name patched waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK waiting for pod/some-name-rs0-3 to be ready...........OK waiting for pod/some-name-rs0-4 to be ready............OK Waiting for cluster readyness waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name patched waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness. waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name configured waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readyness ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted