++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1543/e2e-tests/logs/split-horizon.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1543/e2e-tests/logs/split-horizon.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1543/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ grep gke +++ jq -r .serverVersion.gitVersion WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' v1.26.15-gke.1243000 ']' ++ GKE=1 +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 + create_infra split-horizon-25197 + local ns=split-horizon-25197 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- namespace/psmdb-operator created Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1543-c8a64a78-9-cluster8" modified. ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created deployment.apps/percona-server-mongodb-operator created waiting for pod/percona-server-mongodb-operator-fdfdd955d-4fxq9 to be ready.OK ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- cleaned up old namespaces split-horizon-25197 ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- create namespace split-horizon-25197 ----------------------------------------------------------------------------------- namespace/split-horizon-25197 created Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1543-c8a64a78-9-cluster8" modified. secret/some-users created secret/some-name-ssl created secret/some-name-ssl-internal created deployment.apps/psmdb-client created perconaservermongodb.psmdb.percona.com/some-name created waiting for pod/some-name-rs0-0 to be ready..............OK waiting for pod/some-name-rs0-1 to be ready............OK waiting for pod/some-name-rs0-2 to be ready............OK Waiting for cluster readyness waiting for cluster readynessdeployment.apps/psmdb-client patched waiting for pod/psmdb-client-5649c5b97f-pht52 to be ready.OK ----------------------------------------------------------------------------------- configuring horizons ----------------------------------------------------------------------------------- perconaservermongodb.psmdb.percona.com/some-name configured waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readyness+ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet + local 'command=rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))' + local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YrZHPRmlKh +++ mktemp ++ local LAST_ERR=/tmp/tmp.9RvMKO2u84 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.YrZHPRmlKh ++ cat /tmp/tmp.9RvMKO2u84 ++ rm /tmp/tmp.YrZHPRmlKh /tmp/tmp.9RvMKO2u84 ++ return 0 + local client_container=psmdb-client-5649c5b97f-pht52 + local mongo_flag=--quiet + [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649c5b97f-pht52 -- bash -c 'printf '\''rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ mktemp + local LAST_OUT=/tmp/tmp.3xMa0RPNDH ++ mktemp + local LAST_ERR=/tmp/tmp.MBPU8pJYeA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5649c5b97f-pht52 -- bash -c 'printf '\''rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.3xMa0RPNDH + cat /tmp/tmp.MBPU8pJYeA + rm /tmp/tmp.3xMa0RPNDH /tmp/tmp.MBPU8pJYeA + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1543/e2e-tests/split-horizon/compare/horizons-3.json /tmp/tmp.JoI1SpQyGp/horizons-3.json ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ grep -v certificateNames ++ run_mongo_tls 'db.isMaster().ismaster' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet ++ local 'command=db.isMaster().ismaster' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uk8oJSAKW9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.U6otd3fICw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 0 ']' +++ break +++ cat /tmp/tmp.uk8oJSAKW9 +++ cat /tmp/tmp.U6otd3fICw +++ rm /tmp/tmp.uk8oJSAKW9 /tmp/tmp.U6otd3fICw +++ return 0 ++ local client_container=psmdb-client-5649c5b97f-pht52 ++ local mongo_flag=--quiet ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-5649c5b97f-pht52 -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Dinqyfvrqg +++ mktemp ++ local LAST_ERR=/tmp/tmp.6TQKJUOkAM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-5649c5b97f-pht52 -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.Dinqyfvrqg ++ cat /tmp/tmp.6TQKJUOkAM ++ rm /tmp/tmp.Dinqyfvrqg /tmp/tmp.6TQKJUOkAM ++ return 0 + isMaster=true + '[' true '!=' true ']' + run_mongo_tls 'rs.stepDown()' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet + local 'command=rs.stepDown()' + local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wmZHdcrugM +++ mktemp ++ local LAST_ERR=/tmp/tmp.NVEVLjaSzV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.wmZHdcrugM ++ cat /tmp/tmp.NVEVLjaSzV ++ rm /tmp/tmp.wmZHdcrugM /tmp/tmp.NVEVLjaSzV ++ return 0 + local client_container=psmdb-client-5649c5b97f-pht52 + local mongo_flag=--quiet + [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649c5b97f-pht52 -- bash -c 'printf '\''rs.stepDown()\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ mktemp + local LAST_OUT=/tmp/tmp.56y96iRiFb ++ mktemp + local LAST_ERR=/tmp/tmp.vSsK3HfCHX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-5649c5b97f-pht52 -- bash -c 'printf '\''rs.stepDown()\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.56y96iRiFb {"t":{"$date":"2024-05-07T12:10:55.530Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-07T12:10:55.532Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-07T12:10:55.537Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"js","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-07T12:10:55.546Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-07T12:10:55.550Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-2.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-07T12:10:55.550Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-07T12:10:55.555Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-2.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1715083853, 2), "signature" : { "hash" : BinData(0,"e4SzuLvZjxlGz0460CIiU61Era0="), "keyId" : NumberLong("7366228676280582151") } }, "operationTime" : Timestamp(1715083853, 2) } {"t":{"$date":"2024-05-07T12:10:55.599Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"js","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} + cat /tmp/tmp.vSsK3HfCHX + rm /tmp/tmp.56y96iRiFb /tmp/tmp.vSsK3HfCHX + return 0 + sleep 10 ++ run_mongo_tls 'db.isMaster().ismaster' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet ++ local 'command=db.isMaster().ismaster' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ grep -v certificateNames ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cxqckOXKeT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fZ0cQCTmmv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 0 ']' +++ break +++ cat /tmp/tmp.cxqckOXKeT +++ cat /tmp/tmp.fZ0cQCTmmv +++ rm /tmp/tmp.cxqckOXKeT /tmp/tmp.fZ0cQCTmmv +++ return 0 ++ local client_container=psmdb-client-5649c5b97f-pht52 ++ local mongo_flag=--quiet ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-5649c5b97f-pht52 -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DdEJcMsVB8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.a3VByIFnNa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-5649c5b97f-pht52 -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.DdEJcMsVB8 ++ cat /tmp/tmp.a3VByIFnNa ++ rm /tmp/tmp.DdEJcMsVB8 /tmp/tmp.a3VByIFnNa ++ return 0 + isMaster=true + '[' true '!=' true ']' + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1543/e2e-tests/split-horizon/conf/some-name-5horizons.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1543/e2e-tests/split-horizon/conf/some-name-5horizons.yml ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1543/e2e-tests/split-horizon/conf/some-name-5horizons.yml + local LAST_OUT=/tmp/tmp.Dis00LGkGv ++ mktemp + local LAST_ERR=/tmp/tmp.xogRoSbO6l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1543-c8a64a78"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.Dis00LGkGv perconaservermongodb.psmdb.percona.com/some-name configured + cat /tmp/tmp.xogRoSbO6l + rm /tmp/tmp.Dis00LGkGv /tmp/tmp.xogRoSbO6l + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 0 == 1 ]] + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name patched waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK waiting for pod/some-name-rs0-3 to be ready..........OK waiting for pod/some-name-rs0-4 to be ready...........OK Waiting for cluster readyness waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name patched waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness.. waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name configured waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readyness ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted