++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1555/e2e-tests/logs/serviceless-external-nodes.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1555/e2e-tests/logs/serviceless-external-nodes.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1555/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ jq -r .serverVersion.gitVersion +++ kubectl version -o json +++ grep gke WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' v1.26.15-gke.1300000 ']' ++ GKE=1 +++ kubectl version -o json +++ /usr/bin/sed -r 's/[^0-9.]+//g' +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod7.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod7.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ local 'cli=mongod --version' +++ local pod_name=5267 +++ kubectl_bin -n default run 5267 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity +++ /usr/bin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0XPcuX77b2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wVGKRl1lVH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default run 5267 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 0 ']' +++ break +++ cat /tmp/tmp.0XPcuX77b2 +++ cat /tmp/tmp.wVGKRl1lVH +++ rm /tmp/tmp.0XPcuX77b2 /tmp/tmp.wVGKRl1lVH +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/5267 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Bf3VXq05He ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VuqzEVwp1X +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/5267 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 0 ']' +++ break +++ cat /tmp/tmp.Bf3VXq05He +++ cat /tmp/tmp.VuqzEVwp1X +++ rm /tmp/tmp.Bf3VXq05He /tmp/tmp.VuqzEVwp1X +++ return 0 ++++ kubectl_bin -n default exec 5267 -- mongod --version +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.G8CvkssGTy +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Q5pqu4IM93 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default exec 5267 -- mongod --version ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 0 ']' ++++ break ++++ cat /tmp/tmp.G8CvkssGTy ++++ cat /tmp/tmp.Q5pqu4IM93 ++++ rm /tmp/tmp.G8CvkssGTy /tmp/tmp.Q5pqu4IM93 ++++ return 0 +++ local 'output=db version v7.0.8-5 Build Info: { "version": "7.0.8-5", "gitVersion": "197ff4d49589e8b5a444b7ebc6f4d33b6f00105e", "openSSLVersion": "OpenSSL 1.1.1k FIPS 25 Mar 2021", "modules": [], "proFeatures": [], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/5267 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sKA94Dpf1Q ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7gUH5fSCOv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default delete pod/5267 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 0 ']' +++ break +++ cat /tmp/tmp.sKA94Dpf1Q +++ cat /tmp/tmp.7gUH5fSCOv Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.sKA94Dpf1Q /tmp/tmp.7gUH5fSCOv +++ return 0 +++ echo db version v7.0.8-5 Build Info: '{' '"version":' '"7.0.8-5",' '"gitVersion":' '"197ff4d49589e8b5a444b7ebc6f4d33b6f00105e",' '"openSSLVersion":' '"OpenSSL' 1.1.1k FIPS 25 Mar '2021",' '"modules":' '[],' '"proFeatures":' '[],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=7.0.8-5 ++ [[ ! 7.0.8-5 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 7.0.8-5 + FULL_VER=7.0.8-5 + MONGO_VER=7.0 + unset OPERATOR_NS + desc 'Create main cluster' + set +o xtrace ----------------------------------------------------------------------------------- Create main cluster ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- cleaned up old namespaces serviceless-external-nodes-18992 ----------------------------------------------------------------------------------- error: resource(s) were provided, but no name was specified ----------------------------------------------------------------------------------- create namespace serviceless-external-nodes-18992 ----------------------------------------------------------------------------------- namespace/serviceless-external-nodes-18992 created Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1555-0d8e9429-2-cluster2" modified. ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied role.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created rolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created deployment.apps/percona-server-mongodb-operator created waiting for pod/percona-server-mongodb-operator-db54c86f5-kk427 to be ready.OK deployment.apps/psmdb-client created secret/mydb-users created secret/mydb-encryption-key created secret/mydb-ssl created secret/mydb-ssl-internal created secret/mydb-mongodb-keyfile created perconaservermongodb.psmdb.percona.com/mydb created waiting for pod/mydb-rs0-0 to be ready................OK Waiting for cluster readyness............. ----------------------------------------------------------------------------------- Start External Cluster in unmanaged mode ----------------------------------------------------------------------------------- Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1555-0d8e9429-2-cluster2" modified. ----------------------------------------------------------------------------------- cleaned up old namespaces serviceless-external-nodes-replica-19190 ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- create namespace serviceless-external-nodes-replica-19190 ----------------------------------------------------------------------------------- namespace/serviceless-external-nodes-replica-19190 created Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1555-0d8e9429-2-cluster2" modified. ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied role.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created rolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created deployment.apps/percona-server-mongodb-operator created waiting for pod/percona-server-mongodb-operator-db54c86f5-mv6kw to be ready.OK deployment.apps/psmdb-client created secret/mydb-users created secret/mydb-encryption-key created secret/mydb-ssl created secret/mydb-ssl-internal created secret/mydb-mongodb-keyfile created perconaservermongodb.psmdb.percona.com/mydb created waiting for pod/mydb-rs0-0 to be ready................OK waiting for pod/mydb-rs0-1 to be ready.................OK Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1555-0d8e9429-2-cluster2" modified. perconaservermongodb.psmdb.percona.com/mydb configured waiting for cluster readynessperconaservermongodb.psmdb.percona.com "mydb" deleted ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" role.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted serviceaccount "percona-server-mongodb-operator" deleted rolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" namespace "serviceless-external-nodes-replica-19190" force deleted Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. ----------------------------------------------------------------------------------- test passed -----------------------------------------------------------------------------------