++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1377/e2e-tests/logs/split-horizon.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1377/e2e-tests/logs/split-horizon.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1377/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ grep '\-eks\-' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion WARNING: version difference between client (1.29) and server (1.25) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep gke WARNING: version difference between client (1.29) and server (1.25) exceeds the supported minor version skew of +/-1 ++ '[' v1.25.16-gke.1268000 ']' ++ GKE=1 +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' +++ kubectl version -o json WARNING: version difference between client (1.29) and server (1.25) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.25 + create_infra split-horizon-8718 + local ns=split-horizon-8718 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- namespace/psmdb-operator created Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1377-eeba1539-6-cluster4" modified. ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created deployment.apps/percona-server-mongodb-operator created waiting for pod/percona-server-mongodb-operator-d6f9576f6-pw7wc to be ready.OK ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- cleaned up old namespaces split-horizon-8718 ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- create namespace split-horizon-8718 ----------------------------------------------------------------------------------- namespace/split-horizon-8718 created Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1377-eeba1539-6-cluster4" modified. secret/some-users created secret/some-name-ssl created secret/some-name-ssl-internal created deployment.apps/psmdb-client created perconaservermongodb.psmdb.percona.com/some-name created waiting for pod/some-name-rs0-0 to be ready............OK waiting for pod/some-name-rs0-1 to be ready..........OK waiting for pod/some-name-rs0-2 to be ready..........OK Waiting for cluster readyness waiting for cluster readynessdeployment.apps/psmdb-client patched waiting for pod/psmdb-client-5bc8cdd6cf-pptxh to be ready.OK ----------------------------------------------------------------------------------- configuring horizons ----------------------------------------------------------------------------------- perconaservermongodb.psmdb.percona.com/some-name configured waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readyness+ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet + local 'command=rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))' + local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.egPJsPzYzm +++ mktemp ++ local LAST_ERR=/tmp/tmp.chKOCAxdd5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.egPJsPzYzm ++ cat /tmp/tmp.chKOCAxdd5 ++ rm /tmp/tmp.egPJsPzYzm /tmp/tmp.chKOCAxdd5 ++ return 0 + local client_container=psmdb-client-5bc8cdd6cf-pptxh + local mongo_flag=--quiet + [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5bc8cdd6cf-pptxh -- bash -c 'printf '\''rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ mktemp + local LAST_OUT=/tmp/tmp.RiAlZka5gL ++ mktemp + local LAST_ERR=/tmp/tmp.0zyZgDxFFa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5bc8cdd6cf-pptxh -- bash -c 'printf '\''rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.RiAlZka5gL + cat /tmp/tmp.0zyZgDxFFa + rm /tmp/tmp.RiAlZka5gL /tmp/tmp.0zyZgDxFFa + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1377/e2e-tests/split-horizon/compare/horizons-3.json /tmp/tmp.3UKO50fxOr/horizons-3.json ++ grep -v certificateNames ++ run_mongo_tls 'db.isMaster().ismaster' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet ++ local 'command=db.isMaster().ismaster' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IrIfVywXAe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UPBsOUjUP8 +++ local exit_status=0 +++ local timeout=4 ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 0 ']' +++ break +++ cat /tmp/tmp.IrIfVywXAe +++ cat /tmp/tmp.UPBsOUjUP8 +++ rm /tmp/tmp.IrIfVywXAe /tmp/tmp.UPBsOUjUP8 +++ return 0 ++ local client_container=psmdb-client-5bc8cdd6cf-pptxh ++ local mongo_flag=--quiet ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-5bc8cdd6cf-pptxh -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0A3Pc77Xyk +++ mktemp ++ local LAST_ERR=/tmp/tmp.VeyBuJjORP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-5bc8cdd6cf-pptxh -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.0A3Pc77Xyk ++ cat /tmp/tmp.VeyBuJjORP ++ rm /tmp/tmp.0A3Pc77Xyk /tmp/tmp.VeyBuJjORP ++ return 0 + isMaster=true + '[' true '!=' true ']' + run_mongo_tls 'rs.stepDown()' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet + local 'command=rs.stepDown()' + local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5TV0iJA7tj +++ mktemp ++ local LAST_ERR=/tmp/tmp.iwtoGxv3vo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.5TV0iJA7tj ++ cat /tmp/tmp.iwtoGxv3vo ++ rm /tmp/tmp.5TV0iJA7tj /tmp/tmp.iwtoGxv3vo ++ return 0 + local client_container=psmdb-client-5bc8cdd6cf-pptxh + local mongo_flag=--quiet + [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5bc8cdd6cf-pptxh -- bash -c 'printf '\''rs.stepDown()\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ mktemp + local LAST_OUT=/tmp/tmp.Rtil0WudbM ++ mktemp + local LAST_ERR=/tmp/tmp.TdWQSFb9qW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5bc8cdd6cf-pptxh -- bash -c 'printf '\''rs.stepDown()\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.Rtil0WudbM {"t":{"$date":"2024-01-27T17:15:29.241Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-01-27T17:15:29.243Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-01-27T17:15:29.248Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"js","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-01-27T17:15:29.255Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-01-27T17:15:29.260Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-2.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-01-27T17:15:29.260Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-01-27T17:15:29.265Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-2.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1706375727, 1), "signature" : { "hash" : BinData(0,"q0szSMlzhiSHW6CQfr0YLAv17Fs="), "keyId" : NumberLong("7328827512656494597") } }, "operationTime" : Timestamp(1706375727, 1) } {"t":{"$date":"2024-01-27T17:15:29.314Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"js","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} + cat /tmp/tmp.TdWQSFb9qW + rm /tmp/tmp.Rtil0WudbM /tmp/tmp.TdWQSFb9qW + return 0 + sleep 10 ++ grep -v certificateNames ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ run_mongo_tls 'db.isMaster().ismaster' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet ++ local 'command=db.isMaster().ismaster' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3dAZoj191S ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Cd0HaGO7S4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 0 ']' +++ break +++ cat /tmp/tmp.3dAZoj191S +++ cat /tmp/tmp.Cd0HaGO7S4 +++ rm /tmp/tmp.3dAZoj191S /tmp/tmp.Cd0HaGO7S4 +++ return 0 ++ local client_container=psmdb-client-5bc8cdd6cf-pptxh ++ local mongo_flag=--quiet ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-5bc8cdd6cf-pptxh -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ABwzb27MZo +++ mktemp ++ local LAST_ERR=/tmp/tmp.6JpX8dHVaO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-5bc8cdd6cf-pptxh -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.ABwzb27MZo ++ cat /tmp/tmp.6JpX8dHVaO ++ rm /tmp/tmp.ABwzb27MZo /tmp/tmp.6JpX8dHVaO ++ return 0 + isMaster=true + '[' true '!=' true ']' + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1377/e2e-tests/split-horizon/conf/some-name-5horizons.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1377/e2e-tests/split-horizon/conf/some-name-5horizons.yml + kubectl_bin apply -f - + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1377-eeba1539"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1377/e2e-tests/split-horizon/conf/some-name-5horizons.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod5.0"' + local LAST_OUT=/tmp/tmp.LtVlorFhPE ++ mktemp + local LAST_ERR=/tmp/tmp.62U6OXvy4P + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.LtVlorFhPE perconaservermongodb.psmdb.percona.com/some-name configured + cat /tmp/tmp.62U6OXvy4P + rm /tmp/tmp.LtVlorFhPE /tmp/tmp.62U6OXvy4P + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 0 == 1 ]] + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name patched waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK waiting for pod/some-name-rs0-3 to be ready.........OK waiting for pod/some-name-rs0-4 to be ready..........OK Waiting for cluster readyness waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name patched waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name configured waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readyness ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted