++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1555/e2e-tests/logs/split-horizon.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1555/e2e-tests/logs/split-horizon.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1555/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ grep '\-eks\-' +++ jq -r .serverVersion.gitVersion WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ jq -r .serverVersion.gitVersion +++ grep gke +++ kubectl version -o json WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' v1.26.15-gke.1300000 ']' ++ GKE=1 +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' +++ kubectl version -o json WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 + create_infra split-horizon-2859 + local ns=split-horizon-2859 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- namespace/psmdb-operator created Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1555-0d8e9429-2-cluster6" modified. ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created deployment.apps/percona-server-mongodb-operator created waiting for pod/percona-server-mongodb-operator-f7749f8b4-glmkx to be ready.OK ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- cleaned up old namespaces split-horizon-2859 ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- create namespace split-horizon-2859 ----------------------------------------------------------------------------------- namespace/split-horizon-2859 created Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1555-0d8e9429-2-cluster6" modified. secret/some-users created secret/some-name-ssl created secret/some-name-ssl-internal created deployment.apps/psmdb-client created perconaservermongodb.psmdb.percona.com/some-name created waiting for pod/some-name-rs0-0 to be ready............OK waiting for pod/some-name-rs0-1 to be ready............OK waiting for pod/some-name-rs0-2 to be ready...........OK Waiting for cluster readyness waiting for cluster readynessdeployment.apps/psmdb-client patched waiting for pod/psmdb-client-5f578b7f94-zq4ls to be ready.OK ----------------------------------------------------------------------------------- configuring horizons ----------------------------------------------------------------------------------- perconaservermongodb.psmdb.percona.com/some-name configured waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readyness+ run_mongo 'rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))' + local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vQBeQorsUD +++ mktemp ++ local LAST_ERR=/tmp/tmp.WJQEk9TdEg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.vQBeQorsUD ++ cat /tmp/tmp.WJQEk9TdEg ++ rm /tmp/tmp.vQBeQorsUD /tmp/tmp.WJQEk9TdEg ++ return 0 + local client_container=psmdb-client-98486d799-ksbv5 + local mongo_flag=--quiet + [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-98486d799-ksbv5 -- bash -c 'printf '\''rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ mktemp + local LAST_OUT=/tmp/tmp.TNMF4vbsXq ++ mktemp + local LAST_ERR=/tmp/tmp.wGxJkdI0Qg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-98486d799-ksbv5 -- bash -c 'printf '\''rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.TNMF4vbsXq + cat /tmp/tmp.wGxJkdI0Qg + rm /tmp/tmp.TNMF4vbsXq /tmp/tmp.wGxJkdI0Qg + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1555/e2e-tests/split-horizon/compare/horizons-3.json /tmp/tmp.to3rkZwm02/horizons-3.json ++ run_mongo_tls 'db.isMaster().ismaster' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet ++ local 'command=db.isMaster().ismaster' ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ grep -v certificateNames +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.60OKX4LmKj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iamuPhd8ks +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 0 ']' +++ break +++ cat /tmp/tmp.60OKX4LmKj +++ cat /tmp/tmp.iamuPhd8ks +++ rm /tmp/tmp.60OKX4LmKj /tmp/tmp.iamuPhd8ks +++ return 0 ++ local client_container=psmdb-client-98486d799-ksbv5 ++ local mongo_flag=--quiet ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-98486d799-ksbv5 -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xKxXO6H8HH +++ mktemp ++ local LAST_ERR=/tmp/tmp.hRRFQlbJzO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-98486d799-ksbv5 -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.xKxXO6H8HH ++ cat /tmp/tmp.hRRFQlbJzO ++ rm /tmp/tmp.xKxXO6H8HH /tmp/tmp.hRRFQlbJzO ++ return 0 + isMaster=true + '[' true '!=' true ']' + run_mongo_tls 'rs.stepDown()' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet + local 'command=rs.stepDown()' + local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VQmUzmMwlp +++ mktemp ++ local LAST_ERR=/tmp/tmp.3BxmlnL1Il ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.VQmUzmMwlp ++ cat /tmp/tmp.3BxmlnL1Il ++ rm /tmp/tmp.VQmUzmMwlp /tmp/tmp.3BxmlnL1Il ++ return 0 + local client_container=psmdb-client-98486d799-ksbv5 + local mongo_flag=--quiet + [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-98486d799-ksbv5 -- bash -c 'printf '\''rs.stepDown()\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ mktemp + local LAST_OUT=/tmp/tmp.wJXFDoTlDn ++ mktemp + local LAST_ERR=/tmp/tmp.qrIIA9Qh4J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-98486d799-ksbv5 -- bash -c 'printf '\''rs.stepDown()\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.wJXFDoTlDn {"t":{"$date":"2024-05-20T12:42:49.031Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-20T12:42:49.043Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-20T12:42:49.046Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-20T12:42:49.047Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-2.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-20T12:42:49.053Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"js","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-20T12:42:49.053Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-0.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} {"t":{"$date":"2024-05-20T12:42:49.056Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-2.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1716208962, 2), "signature" : { "hash" : BinData(0,"+Bcphk8GacRmqS45qqkbFUY87qI="), "keyId" : NumberLong("7371061016999755782") } }, "operationTime" : Timestamp(1716208962, 2) } {"t":{"$date":"2024-05-20T12:42:49.112Z"},"s":"W", "c":"NETWORK", "id":23238, "ctx":"js","msg":"The server certificate does not match the remote host name","attr":{"remoteHost":"some-name-rs0-1.clouddemo.xyz","certificateNames":"SAN(s): localhost, some-name-rs0, *.some-name-rs0, "}} + cat /tmp/tmp.qrIIA9Qh4J + rm /tmp/tmp.wJXFDoTlDn /tmp/tmp.qrIIA9Qh4J + return 0 + sleep 10 ++ grep -v certificateNames ++ run_mongo_tls 'db.isMaster().ismaster' clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz mongodb '' --quiet ++ local 'command=db.isMaster().ismaster' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz ++ local driver=mongodb ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LDAFBuXsWb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gcmG9bG3HF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 0 ']' +++ break +++ cat /tmp/tmp.LDAFBuXsWb +++ cat /tmp/tmp.gcmG9bG3HF +++ rm /tmp/tmp.LDAFBuXsWb /tmp/tmp.gcmG9bG3HF +++ return 0 ++ local client_container=psmdb-client-98486d799-ksbv5 ++ local mongo_flag=--quiet ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-98486d799-ksbv5 -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HdN2MPfy11 +++ mktemp ++ local LAST_ERR=/tmp/tmp.PnbGVU7W3z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-98486d799-ksbv5 -- bash -c 'printf '\''db.isMaster().ismaster\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz.svc.cluster.local/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 0 ']' ++ break ++ cat /tmp/tmp.HdN2MPfy11 ++ cat /tmp/tmp.PnbGVU7W3z ++ rm /tmp/tmp.HdN2MPfy11 /tmp/tmp.PnbGVU7W3z ++ return 0 + isMaster=true + '[' true '!=' true ']' + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1555/e2e-tests/split-horizon/conf/some-name-5horizons.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1555/e2e-tests/split-horizon/conf/some-name-5horizons.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1555/e2e-tests/split-horizon/conf/some-name-5horizons.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1555-0d8e9429"' ++ mktemp + local LAST_OUT=/tmp/tmp.46NHvXMmgS ++ mktemp + local LAST_ERR=/tmp/tmp.r30SQ9gK5m + local exit_status=0 + local timeout=4 ++ seq 0 2 + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + yq eval '.spec.upgradeOptions.apply="Never"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 0 ']' + break + cat /tmp/tmp.46NHvXMmgS perconaservermongodb.psmdb.percona.com/some-name configured + cat /tmp/tmp.r30SQ9gK5m + rm /tmp/tmp.46NHvXMmgS /tmp/tmp.r30SQ9gK5m + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 0 == 1 ]] + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name patched waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK waiting for pod/some-name-rs0-3 to be ready...........OK waiting for pod/some-name-rs0-4 to be ready.............OK Waiting for cluster readyness waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name patched waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness.. waiting for cluster readynessperconaservermongodb.psmdb.percona.com/some-name configured waiting for pod/some-name-rs0-0 to be ready.OK waiting for pod/some-name-rs0-1 to be ready.OK waiting for pod/some-name-rs0-2 to be ready.OK Waiting for cluster readyness waiting for cluster readyness ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted