Skip to content

Commit

Permalink
feat(runtime/kubernetes): Proper load balancing of k8s
Browse files Browse the repository at this point in the history
We can load balance k8s based on number of pending pods,
use one with 0, or least pods.

Signed-off-by: Denys Fedoryshchenko <[email protected]>
  • Loading branch information
nuclearcat committed Nov 25, 2024
1 parent e86fe6b commit 23be022
Showing 1 changed file with 28 additions and 2 deletions.
30 changes: 28 additions & 2 deletions kernelci/runtime/kubernetes.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,36 @@ def generate(self, job, params):
params['k8s_job_name'] = k8s_job_name
return template.render(params)

def _get_clusters_load(self):
"""Get the load of all clusters (number of pods in Pending state)"""
core_v1 = kubernetes.client.CoreV1Api()
load = {}
for context in self.config.context:
kubernetes.config.load_kube_config(context=context)
try:
pods = core_v1.list_namespaced_pod(namespace='default')
except kubernetes.client.rest.ApiException as error:
print(f'Error listing pods in {context}: {error}')
continue
load[context] = len([pod for pod in pods.items
if pod.status.phase == 'Pending'])
# if no pod is pending, return the context, no need to load-balance
# we have idle clusters
if load[context] == 0:
return {context: 0}
if not load:
# Workaround for no pods found in any context
print('No pods found in any context')
return {self.config.context[0]: 0}
return load

def submit(self, job_path):
# if context is array, pick any random context to load-balance
# if context is array, we have multiple k8s build clusters
# TBD: Implement caching to not check load for each job?
if isinstance(self.config.context, list):
self.kcontext = random.choice(self.config.context)
# get the cluster with the least load
load = self._get_clusters_load()
self.kcontext = min(load, key=load.get)
else:
self.kcontext = self.config.context
kubernetes.config.load_kube_config(context=self.kcontext)
Expand Down

0 comments on commit 23be022

Please sign in to comment.