Skip to content

Commit

Permalink
Merge branch 'master' into update-azuresearch
Browse files Browse the repository at this point in the history
  • Loading branch information
lz-chen authored Jan 8, 2024
2 parents 0c137cd + 4c47f39 commit 5dcdcae
Show file tree
Hide file tree
Showing 19 changed files with 582 additions and 139 deletions.
2 changes: 1 addition & 1 deletion docs/docs/expression_language/why.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1007,7 +1007,7 @@
"from langchain_openai import OpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_core.runnables import RunnablePassthrough, ConfigurableField\n",
"\n",
"os.environ[\"LANGCHAIN_API_KEY\"] = \"...\"\n",
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
Expand Down
17 changes: 6 additions & 11 deletions libs/community/langchain_community/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
from langchain_core.embeddings import Embeddings
from langchain_core.language_models.llms import LLM, get_prompts
from langchain_core.load.dump import dumps
from langchain_core.load.load import _loads_suppress_warning
from langchain_core.load.load import loads
from langchain_core.outputs import ChatGeneration, Generation
from langchain_core.utils import get_from_env

Expand Down Expand Up @@ -149,10 +149,7 @@ def _loads_generations(generations_str: str) -> Union[RETURN_VAL_TYPE, None]:
RETURN_VAL_TYPE: A list of generations.
"""
try:
generations = [
_loads_suppress_warning(_item_str)
for _item_str in json.loads(generations_str)
]
generations = [loads(_item_str) for _item_str in json.loads(generations_str)]
return generations
except (json.JSONDecodeError, TypeError):
# deferring the (soft) handling to after the legacy-format attempt
Expand Down Expand Up @@ -227,7 +224,7 @@ def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
rows = session.execute(stmt).fetchall()
if rows:
try:
return [_loads_suppress_warning(row[0]) for row in rows]
return [loads(row[0]) for row in rows]
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
Expand Down Expand Up @@ -398,7 +395,7 @@ def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
if results:
for _, text in results.items():
try:
generations.append(_loads_suppress_warning(text))
generations.append(loads(text))
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
Expand Down Expand Up @@ -538,9 +535,7 @@ def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
if results:
for document in results:
try:
generations.extend(
_loads_suppress_warning(document.metadata["return_val"])
)
generations.extend(loads(document.metadata["return_val"]))
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
Expand Down Expand Up @@ -1190,7 +1185,7 @@ def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
rows = self._search_rows(prompt, llm_string)
if rows:
return [_loads_suppress_warning(row[0]) for row in rows]
return [loads(row[0]) for row in rows]
return None

def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
Expand Down
8 changes: 3 additions & 5 deletions libs/community/langchain_community/chat_loaders/langsmith.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import TYPE_CHECKING, Dict, Iterable, Iterator, List, Optional, Union, cast

from langchain_core.chat_sessions import ChatSession
from langchain_core.load.load import _load_suppress_warning
from langchain_core.load.load import load

from langchain_community.chat_loaders.base import BaseChatLoader

Expand Down Expand Up @@ -66,10 +66,8 @@ def _get_messages_from_llm_run(llm_run: "Run") -> ChatSession:
raise ValueError(f"Run has no 'messages' inputs. Got {llm_run.inputs}")
if not llm_run.outputs:
raise ValueError("Cannot convert pending run")
messages = _load_suppress_warning(llm_run.inputs)["messages"]
message_chunk = _load_suppress_warning(llm_run.outputs)["generations"][0][
"message"
]
messages = load(llm_run.inputs)["messages"]
message_chunk = load(llm_run.outputs)["generations"][0]["message"]
return ChatSession(messages=messages + [message_chunk])

@staticmethod
Expand Down
8 changes: 1 addition & 7 deletions libs/community/langchain_community/chat_models/anyscale.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,9 @@
import logging
import os
import sys
from typing import TYPE_CHECKING, Any, Dict, Optional, Set
from typing import TYPE_CHECKING, Dict, Optional, Set

import requests
from langchain_core._api.deprecation import suppress_langchain_deprecation_warning
from langchain_core.messages import BaseMessage
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
Expand Down Expand Up @@ -73,11 +72,6 @@ def is_lc_serializable(cls) -> bool:
available_models: Optional[Set[str]] = None
"""Available models from Anyscale API."""

def __init__(self, *kwargs: Any) -> None:
# bypass deprecation warning for ChatOpenAI
with suppress_langchain_deprecation_warning():
super().__init__(*kwargs)

@staticmethod
def get_available_models(
anyscale_api_key: Optional[str] = None,
Expand Down
8 changes: 1 addition & 7 deletions libs/community/langchain_community/chat_models/everlyai.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,8 @@

import logging
import sys
from typing import TYPE_CHECKING, Any, Dict, Optional, Set
from typing import TYPE_CHECKING, Dict, Optional, Set

from langchain_core._api.deprecation import suppress_langchain_deprecation_warning
from langchain_core.messages import BaseMessage
from langchain_core.pydantic_v1 import Field, root_validator
from langchain_core.utils import get_from_dict_or_env
Expand Down Expand Up @@ -65,11 +64,6 @@ def is_lc_serializable(cls) -> bool:
available_models: Optional[Set[str]] = None
"""Available models from EverlyAI API."""

def __init__(self, *kwargs: Any) -> None:
# bypass deprecation warning for ChatOpenAI
with suppress_langchain_deprecation_warning():
super().__init__(*kwargs)

@staticmethod
def get_available_models() -> Set[str]:
"""Get available models from EverlyAI API."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import datetime
from typing import Any, Dict, List, Optional

from langchain_core._api.deprecation import suppress_langchain_deprecation_warning
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
Expand Down Expand Up @@ -40,11 +39,6 @@ class PromptLayerChatOpenAI(ChatOpenAI):
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False

def __init__(self, *kwargs: Any) -> None:
# bypass deprecation warning for ChatOpenAI
with suppress_langchain_deprecation_warning():
super().__init__(*kwargs)

@classmethod
def is_lc_serializable(cls) -> bool:
return False
Expand Down
6 changes: 0 additions & 6 deletions libs/community/langchain_community/llms/promptlayer_openai.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import datetime
from typing import Any, List, Optional

from langchain_core._api.deprecation import suppress_langchain_deprecation_warning
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
Expand Down Expand Up @@ -38,11 +37,6 @@ class PromptLayerOpenAI(OpenAI):
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False

def __init__(self, *kwargs: Any) -> None:
# bypass deprecation warning for ChatOpenAI
with suppress_langchain_deprecation_warning():
super().__init__(*kwargs)

@classmethod
def is_lc_serializable(cls) -> bool:
return False
Expand Down
4 changes: 2 additions & 2 deletions libs/community/poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions libs/community/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-community"
version = "0.0.9"
version = "0.0.10"
description = "Community contributed LangChain integrations."
authors = []
license = "MIT"
Expand All @@ -9,7 +9,7 @@ repository = "https://github.com/langchain-ai/langchain"

[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
langchain-core = ">=0.1.7,<0.2"
langchain-core = ">=0.1.8,<0.2"
SQLAlchemy = ">=1.4,<3"
requests = "^2"
PyYAML = ">=5.3"
Expand Down
67 changes: 43 additions & 24 deletions libs/core/langchain_core/_api/beta_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
import warnings
from typing import Any, Callable, Generator, Type, TypeVar

from langchain_core._api.internal import is_caller_internal


class LangChainBetaWarning(DeprecationWarning):
"""A class for issuing beta warnings for LangChain users."""
Expand Down Expand Up @@ -78,21 +80,60 @@ def beta(
_addendum: str = addendum,
) -> T:
"""Implementation of the decorator returned by `beta`."""

def emit_warning() -> None:
"""Emit the warning."""
warn_beta(
message=_message,
name=_name,
obj_type=_obj_type,
addendum=_addendum,
)

warned = False

def warning_emitting_wrapper(*args: Any, **kwargs: Any) -> Any:
"""Wrapper for the original wrapped callable that emits a warning.
Args:
*args: The positional arguments to the function.
**kwargs: The keyword arguments to the function.
Returns:
The return value of the function being wrapped.
"""
nonlocal warned
if not warned and not is_caller_internal():
warned = True
emit_warning()
return wrapped(*args, **kwargs)

if isinstance(obj, type):
if not _obj_type:
_obj_type = "class"
wrapped = obj.__init__ # type: ignore
_name = _name or obj.__name__
old_doc = obj.__doc__

def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
def finalize(_: Any, new_doc: str) -> T:
"""Finalize the annotation of a class."""
try:
obj.__doc__ = new_doc
except AttributeError: # Can't set on some extension objects.
pass

def warn_if_direct_instance(
self: Any, *args: Any, **kwargs: Any
) -> Any:
"""Warn that the class is in beta."""
nonlocal warned
if not warned and type(self) is obj and not is_caller_internal():
warned = True
emit_warning()
return wrapped(self, *args, **kwargs)

obj.__init__ = functools.wraps(obj.__init__)( # type: ignore[misc]
wrapper
warn_if_direct_instance
)
return obj

Expand Down Expand Up @@ -155,28 +196,6 @@ def finalize( # type: ignore
wrapper.__doc__ = new_doc
return wrapper

def emit_warning() -> None:
"""Emit the warning."""
warn_beta(
message=_message,
name=_name,
obj_type=_obj_type,
addendum=_addendum,
)

def warning_emitting_wrapper(*args: Any, **kwargs: Any) -> Any:
"""Wrapper for the original wrapped callable that emits a warning.
Args:
*args: The positional arguments to the function.
**kwargs: The keyword arguments to the function.
Returns:
The return value of the function being wrapped.
"""
emit_warning()
return wrapped(*args, **kwargs)

old_doc = inspect.cleandoc(old_doc or "").strip("\n")

if not old_doc:
Expand Down
Loading

0 comments on commit 5dcdcae

Please sign in to comment.