From 93bbd49b810045e07991510af952ede0204840b6 Mon Sep 17 00:00:00 2001 From: Danny McCormick Date: Mon, 23 Sep 2024 11:55:47 -0400 Subject: [PATCH] lint --- sdks/python/apache_beam/ml/inference/vllm_inference.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdks/python/apache_beam/ml/inference/vllm_inference.py b/sdks/python/apache_beam/ml/inference/vllm_inference.py index f9c246ab47d3..929b6c945b4d 100644 --- a/sdks/python/apache_beam/ml/inference/vllm_inference.py +++ b/sdks/python/apache_beam/ml/inference/vllm_inference.py @@ -100,7 +100,7 @@ def __init__(self, model_name: str, vllm_server_kwargs: Dict[str, str]): self._vllm_server_kwargs = vllm_server_kwargs self._server_started = False self._server_process = None - self._server_port = None + self._server_port: int = -1 self.start_server() @@ -218,7 +218,7 @@ def __init__( self, model_name: str, chat_template_path: Optional[str] = None, - vllm_server_kwargs: Dict[str, str] = None): + vllm_server_kwargs: Optional[Dict[str, str]] = None): """ Implementation of the ModelHandler interface for vLLM using previous messages as input.