Skip to content

Commit

Permalink
Fix mypy and update docs to match options in common
Browse files Browse the repository at this point in the history
  • Loading branch information
stxue1 committed Sep 20, 2023
1 parent 5e03777 commit 4c4f205
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 18 deletions.
4 changes: 2 additions & 2 deletions docs/running/cliOptions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ from the batch system.
Only applicable to jobs that do not specify an
explicit value for this requirement. Standard suffixes
like K, Ki, M, Mi, G or Gi are supported. Default is
2.0G
2.0Gi
--defaultCores FLOAT The default number of CPU cores to dedicate a job.
Only applicable to jobs that do not specify an
explicit value for this requirement. Fractions of a
Expand All @@ -390,7 +390,7 @@ from the batch system.
Only applicable to jobs that do not specify an
explicit value for this requirement. Standard suffixes
like K, Ki, M, Mi, G or Gi are supported. Default is
2.0G
2.0Gi
--defaultAccelerators ACCELERATOR
The default amount of accelerators to request for a
job. Only applicable to jobs that do not specify an
Expand Down
32 changes: 16 additions & 16 deletions src/toil/batchSystems/kubernetes.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,26 +168,26 @@ def __init__(self, config: Config, maxCores: int, maxMemory: int, maxDisk: int)
self._apis: KubernetesBatchSystem._ApiStorageDict = {}

# Get our namespace (and our Kubernetes credentials to make sure they exist)
self.namespace = self._api('namespace')
self.namespace: str = self._api('namespace')

# Decide if we are going to mount a Kubernetes host path as the Toil
# work dir in the workers, for shared caching.
self.host_path = config.kubernetes_host_path
self.host_path: Optional[str] = config.kubernetes_host_path

# Get the service account name to use, if any.
self.service_account = config.kubernetes_service_account
self.service_account: Optional[str] = config.kubernetes_service_account

# Get how long we should wait for a pod that lands on a node to
# actually start.
self.pod_timeout = config.kubernetes_pod_timeout
self.pod_timeout: float = config.kubernetes_pod_timeout

# Get the username to mark jobs with
username = config.kubernetes_owner or self.get_default_kubernetes_owner()
# And a unique ID for the run
self.unique_id = uuid.uuid4()

# Create a prefix for jobs, starting with our username
self.job_prefix = f'{username}-toil-{self.unique_id}-'
self.job_prefix: str = f'{username}-toil-{self.unique_id}-'
# Instead of letting Kubernetes assign unique job names, we assign our
# own based on a numerical job ID. This functionality is managed by the
# BatchSystemLocalSupport.
Expand All @@ -200,17 +200,17 @@ def __init__(self, config: Config, maxCores: int, maxMemory: int, maxDisk: int)
# conformance tests. To work around this, we tag all our jobs with an
# explicit TTL that is long enough that we're sure we can deal with all
# the finished jobs before they expire.
self.finished_job_ttl = 3600 # seconds
self.finished_job_ttl: int = 3600 # seconds

# Here is where we will store the user script resource object if we get one.
self.user_script: Optional[Resource] = None

# Ge the image to deploy from Toil's configuration
self.docker_image = applianceSelf()
self.docker_image: str = applianceSelf()

# Try and guess what Toil work dir the workers will use.
# We need to be able to provision (possibly shared) space there.
self.worker_work_dir = Toil.getToilWorkDir(config.workDir)
self.worker_work_dir: str = Toil.getToilWorkDir(config.workDir)
if (config.workDir is None and
os.getenv('TOIL_WORKDIR') is None and
self.worker_work_dir == tempfile.gettempdir()):
Expand All @@ -227,17 +227,17 @@ def __init__(self, config: Config, maxCores: int, maxMemory: int, maxDisk: int)
self.environment['TMPDIR'] = '/var/tmp'

# Get the name of the AWS secret, if any, to mount in containers.
self.aws_secret_name = os.environ.get("TOIL_AWS_SECRET_NAME", None)
self.aws_secret_name: Optional[str] = os.environ.get("TOIL_AWS_SECRET_NAME", None)

# Set this to True to enable the experimental wait-for-job-update code
self.enable_watching = os.environ.get("KUBE_WATCH_ENABLED", False)
self.enable_watching: bool = os.environ.get("KUBE_WATCH_ENABLED", False)

# This will be a label to select all our jobs.
self.run_id = f'toil-{self.unique_id}'
self.run_id: str = f'toil-{self.unique_id}'

# Keep track of available resources.
maxMillicores = int(SYS_MAX_SIZE if self.maxCores == SYS_MAX_SIZE else self.maxCores * 1000)
self.resource_sources = [
self.resource_sources: List[ResourcePool] = [
# A pool representing available job slots
ResourcePool(self.config.max_jobs, 'job slots'),
# A pool representing available CPU in units of millicores (1 CPU
Expand All @@ -262,16 +262,16 @@ def __init__(self, config: Config, maxCores: int, maxMemory: int, maxDisk: int)
self._killed_queue_jobs: Set[int] = set()

# We use this event to signal shutdown
self._shutting_down = Event()
self._shutting_down: Event = Event()

# A lock to protect critical regions when working with queued jobs.
self._mutex = RLock()
self._mutex: RLock = RLock()

# A condition set to true when there is more work to do. e.g.: new job
# in the queue or any resource becomes available.
self._work_available = Condition(lock=self._mutex)
self._work_available: Condition = Condition(lock=self._mutex)

self.schedulingThread = Thread(target=self._scheduler, daemon=True)
self.schedulingThread: Thread = Thread(target=self._scheduler, daemon=True)
self.schedulingThread.start()

def _pretty_print(self, kubernetes_object: Any) -> str:
Expand Down

0 comments on commit 4c4f205

Please sign in to comment.