From e8023b4452f065fc93c260954b3ffc169f1596c8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 16 May 2024 14:13:34 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../jupyter_ai_magics/providers.py | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py b/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py index 6da92e1c8..382e2000a 100644 --- a/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py +++ b/packages/jupyter-ai-magics/jupyter_ai_magics/providers.py @@ -630,7 +630,9 @@ def validate_environment(cls, values: Dict) -> Dict: return values # Handle text and image outputs - def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> str: + def _call( + self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any + ) -> str: """Call out to Hugging Face Hub's inference endpoint. Args: @@ -654,8 +656,8 @@ def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> stream=False, task=self.task, ) - - try: # check if this is a text-generation task + + try: # check if this is a text-generation task response_text = json.loads(response.decode())[0]["generated_text"] # Maybe the generation has stopped at one of the stop sequences: # then we remove this stop sequence from the end of the generated text @@ -663,10 +665,10 @@ def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> if response_text[-len(stop_seq) :] == stop_seq: response_text = response_text[: -len(stop_seq)] return response_text - except: # if fails, then try to process as a text-to-image task + except: # if fails, then try to process as a text-to-image task # https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_to_image.example # Custom code for responding to image generation responses - if type(response)==bytes: # Is this an image + if type(response) == bytes: # Is this an image image = self.client.text_to_image(prompt) imageFormat = image.format # Presume it's a PIL ImageFile mimeType = "" @@ -681,10 +683,13 @@ def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> buffer = io.BytesIO() image.save(buffer, format=imageFormat) # # Encode image data to Base64 bytes, then decode bytes to str - return mimeType + ";base64," + base64.b64encode(buffer.getvalue()).decode() + return ( + mimeType + ";base64," + base64.b64encode(buffer.getvalue()).decode() + ) else: - raise ValueError("Task not supported, only text-generation and text-to-image tasks are valid.") - + raise ValueError( + "Task not supported, only text-generation and text-to-image tasks are valid." + ) async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]: return await self._call_in_executor(*args, **kwargs)