diff --git a/guardrails_api/__init__.py b/guardrails_api/__init__.py index 27fdca4..5c71f76 100644 --- a/guardrails_api/__init__.py +++ b/guardrails_api/__init__.py @@ -1 +1 @@ -__version__ = "0.0.3" +__version__ = "0.1.0-alpha1" diff --git a/guardrails_api/utils/get_llm_callable.py b/guardrails_api/utils/get_llm_callable.py index 49d7737..931e820 100644 --- a/guardrails_api/utils/get_llm_callable.py +++ b/guardrails_api/utils/get_llm_callable.py @@ -1,11 +1,5 @@ import litellm from typing import Any, Awaitable, Callable, Union -from guardrails.utils.openai_utils import ( - get_static_openai_create_func, - get_static_openai_chat_create_func, - get_static_openai_acreate_func, - get_static_openai_chat_acreate_func, -) from guardrails_api_client.models.llm_resource import LLMResource @@ -13,15 +7,9 @@ def get_llm_callable( llm_api: str, ) -> Union[Callable, Callable[[Any], Awaitable[Any]]]: # TODO: Add error handling and throw 400 - if llm_api == LLMResource.OPENAI_DOT_COMPLETION_DOT_CREATE.value: - return get_static_openai_create_func() - elif llm_api == LLMResource.OPENAI_DOT_CHAT_COMPLETION_DOT_CREATE.value: - return get_static_openai_chat_create_func() - elif llm_api == LLMResource.OPENAI_DOT_COMPLETION_DOT_ACREATE.value: - return get_static_openai_acreate_func() - elif llm_api == LLMResource.OPENAI_DOT_CHAT_COMPLETION_DOT_ACREATE.value: - return get_static_openai_chat_acreate_func() - elif llm_api == LLMResource.LITELLM_DOT_COMPLETION.value: + # do we need this anymore if were going to use the default handling + # and only set model + if llm_api == LLMResource.LITELLM_DOT_COMPLETION.value: return litellm.completion elif llm_api == LLMResource.LITELLM_DOT_ACOMPLETION.value: return litellm.acompletion