Skip to content

Commit

Permalink
all: test 3.13 ci (#27197)
Browse files Browse the repository at this point in the history
Co-authored-by: Bagatur <[email protected]>
Co-authored-by: Bagatur <[email protected]>
  • Loading branch information
3 people authored Oct 25, 2024
1 parent 06df15c commit 600b7bd
Show file tree
Hide file tree
Showing 237 changed files with 3,668 additions and 4,656 deletions.
9 changes: 6 additions & 3 deletions .github/scripts/check_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
return _get_pydantic_test_configs(dir_)

if dir_ == "libs/core":
py_versions = ["3.9", "3.10", "3.11", "3.12"]
py_versions = ["3.9", "3.10", "3.11", "3.12", "3.13"]
# custom logic for specific directories
elif dir_ == "libs/partners/milvus":
# milvus poetry doesn't allow 3.12 because they
Expand All @@ -125,8 +125,11 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
elif dir_ == "libs/community" and job == "compile-integration-tests":
# community integration deps are slow in 3.12
py_versions = ["3.9", "3.11"]
else:
elif dir_ == ".":
# unable to install with 3.13 because tokenizers doesn't support 3.13 yet
py_versions = ["3.9", "3.12"]
else:
py_versions = ["3.9", "3.13"]

return [{"working-directory": dir_, "python-version": py_v} for py_v in py_versions]

Expand Down Expand Up @@ -305,7 +308,7 @@ def _get_configs_for_multi_dirs(
]
}
map_job_to_configs["test-doc-imports"] = (
[{"python-version": "3.12"}] if docs_edited else []
[{"python-version": "3.13"}] if docs_edited else []
)

for key, value in map_job_to_configs.items():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from langchain_community.utilities.requests import Requests


class NLATool(Tool):
class NLATool(Tool): # type: ignore[override]
"""Natural Language API Tool."""

@classmethod
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def _get_default_llm_chain_factory(
return partial(_get_default_llm_chain, prompt)


class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool):
class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Requests GET tool with LLM-instructed extraction of truncated responses."""

name: str = "requests_get"
Expand Down Expand Up @@ -98,7 +98,7 @@ async def _arun(self, text: str) -> str:
raise NotImplementedError()


class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool):
class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Requests POST tool with LLM-instructed extraction of truncated responses."""

name: str = "requests_post"
Expand Down Expand Up @@ -129,7 +129,7 @@ async def _arun(self, text: str) -> str:
raise NotImplementedError()


class RequestsPatchToolWithParsing(BaseRequestsTool, BaseTool):
class RequestsPatchToolWithParsing(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Requests PATCH tool with LLM-instructed extraction of truncated responses."""

name: str = "requests_patch"
Expand Down Expand Up @@ -162,7 +162,7 @@ async def _arun(self, text: str) -> str:
raise NotImplementedError()


class RequestsPutToolWithParsing(BaseRequestsTool, BaseTool):
class RequestsPutToolWithParsing(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Requests PUT tool with LLM-instructed extraction of truncated responses."""

name: str = "requests_put"
Expand Down Expand Up @@ -193,7 +193,7 @@ async def _arun(self, text: str) -> str:
raise NotImplementedError()


class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool):
class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Tool that sends a DELETE request and parses the response."""

name: str = "requests_delete"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def create_assistant(
name=name,
instructions=instructions,
tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore
tool_resources=tool_resources,
tool_resources=tool_resources, # type: ignore[arg-type]
model=model,
)
return cls(assistant_id=assistant.id, client=client, **kwargs)
Expand Down Expand Up @@ -394,7 +394,7 @@ async def acreate_assistant(
name=name,
instructions=instructions,
tools=openai_tools, # type: ignore
tool_resources=tool_resources,
tool_resources=tool_resources, # type: ignore[arg-type]
model=model,
)
return cls(assistant_id=assistant.id, async_client=async_client, **kwargs)
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/anyscale.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def validate_environment(cls, values: dict) -> Any:
else:
values["openai_api_base"] = values["anyscale_api_base"]
values["openai_api_key"] = values["anyscale_api_key"].get_secret_value()
values["client"] = openai.ChatCompletion
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
except AttributeError as exc:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ def validate_environment(cls, values: Dict) -> Dict:
**client_params
).chat.completions
else:
values["client"] = openai.ChatCompletion
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
return values

@property
Expand Down
37 changes: 20 additions & 17 deletions libs/community/langchain_community/chat_models/azureml_endpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,36 +304,38 @@ def _stream(
"http_client": None,
}

client = openai.OpenAI(**client_params)
client = openai.OpenAI(**client_params) # type: ignore[arg-type, arg-type, arg-type, arg-type, arg-type, arg-type]
message_dicts = [
CustomOpenAIChatContentFormatter._convert_message_to_dict(m)
for m in messages
]
params = {"stream": True, "stop": stop, "model": None, **kwargs}

default_chunk_class = AIMessageChunk
for chunk in client.chat.completions.create(messages=message_dicts, **params):
for chunk in client.chat.completions.create(messages=message_dicts, **params): # type: ignore[arg-type]
if not isinstance(chunk, dict):
chunk = chunk.dict()
if len(chunk["choices"]) == 0:
chunk = chunk.dict() # type: ignore[attr-defined]
if len(chunk["choices"]) == 0: # type: ignore[call-overload]
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
choice = chunk["choices"][0] # type: ignore[call-overload]
chunk = _convert_delta_to_message_chunk( # type: ignore[assignment]
choice["delta"], # type: ignore[arg-type, index]
default_chunk_class, # type: ignore[arg-type, index]
)
generation_info = {}
if finish_reason := choice.get("finish_reason"):
if finish_reason := choice.get("finish_reason"): # type: ignore[union-attr]
generation_info["finish_reason"] = finish_reason
logprobs = choice.get("logprobs")
logprobs = choice.get("logprobs") # type: ignore[union-attr]
if logprobs:
generation_info["logprobs"] = logprobs
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info or None
default_chunk_class = chunk.__class__ # type: ignore[assignment]
chunk = ChatGenerationChunk( # type: ignore[assignment]
message=chunk, # type: ignore[arg-type]
generation_info=generation_info or None, # type: ignore[arg-type]
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk, logprobs=logprobs)
yield chunk
run_manager.on_llm_new_token(chunk.text, chunk=chunk, logprobs=logprobs) # type: ignore[attr-defined, arg-type]
yield chunk # type: ignore[misc]

async def _astream(
self,
Expand All @@ -357,16 +359,17 @@ async def _astream(
"http_client": None,
}

async_client = openai.AsyncOpenAI(**client_params)
async_client = openai.AsyncOpenAI(**client_params) # type: ignore[arg-type, arg-type, arg-type, arg-type, arg-type, arg-type]
message_dicts = [
CustomOpenAIChatContentFormatter._convert_message_to_dict(m)
for m in messages
]
params = {"stream": True, "stop": stop, "model": None, **kwargs}

default_chunk_class = AIMessageChunk
async for chunk in await async_client.chat.completions.create(
messages=message_dicts, **params
async for chunk in await async_client.chat.completions.create( # type: ignore[attr-defined]
messages=message_dicts, # type: ignore[arg-type]
**params, # type: ignore[arg-type]
):
if not isinstance(chunk, dict):
chunk = chunk.dict()
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/everlyai.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def validate_environment_override(cls, values: dict) -> Any:
"Please install it with `pip install openai`.",
) from e
try:
values["client"] = openai.ChatCompletion
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
except AttributeError as exc:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
Expand Down
22 changes: 11 additions & 11 deletions libs/community/langchain_community/chat_models/jinachat.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,11 @@ def _create_retry_decorator(llm: JinaChat) -> Callable[[Any], Any]:
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
retry_if_exception_type(openai.error.Timeout) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIConnectionError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.RateLimitError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.ServiceUnavailableError) # type: ignore[attr-defined]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
Expand Down Expand Up @@ -234,7 +234,7 @@ def validate_environment(cls, values: Dict) -> Dict:
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
Expand Down Expand Up @@ -266,11 +266,11 @@ def _create_retry_decorator(self) -> Callable[[Any], Any]:
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
retry_if_exception_type(openai.error.Timeout) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIConnectionError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.RateLimitError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.ServiceUnavailableError) # type: ignore[attr-defined]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/konko.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
logger = logging.getLogger(__name__)


class ChatKonko(ChatOpenAI):
class ChatKonko(ChatOpenAI): # type: ignore[override]
"""`ChatKonko` Chat large language models API.
To use, you should have the ``konko`` python package installed, and the
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/moonshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from langchain_community.llms.moonshot import MOONSHOT_SERVICE_URL_BASE, MoonshotCommon


class MoonshotChat(MoonshotCommon, ChatOpenAI): # type: ignore[misc]
class MoonshotChat(MoonshotCommon, ChatOpenAI): # type: ignore[misc, override, override]
"""Moonshot large language models.
To use, you should have the ``openai`` python package installed, and the
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/octoai.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def validate_environment(cls, values: Dict) -> Dict:
else:
values["openai_api_base"] = values["octoai_api_base"]
values["openai_api_key"] = values["octoai_api_token"].get_secret_value()
values["client"] = openai.ChatCompletion
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
except ImportError:
raise ImportError(
"Could not import openai python package. "
Expand Down
14 changes: 7 additions & 7 deletions libs/community/langchain_community/chat_models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,11 +87,11 @@ def _create_retry_decorator(
import openai

errors = [
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
openai.error.Timeout, # type: ignore[attr-defined]
openai.error.APIError, # type: ignore[attr-defined]
openai.error.APIConnectionError, # type: ignore[attr-defined]
openai.error.RateLimitError, # type: ignore[attr-defined]
openai.error.ServiceUnavailableError, # type: ignore[attr-defined]
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
Expand Down Expand Up @@ -357,7 +357,7 @@ def validate_environment(cls, values: Dict) -> Dict:
**client_params
).chat.completions
elif not values.get("client"):
values["client"] = openai.ChatCompletion
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
else:
pass
return values
Expand Down Expand Up @@ -594,7 +594,7 @@ def _client_params(self) -> Dict[str, Any]:
if self.openai_proxy:
import openai

openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy}
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[attr-defined]
return {**self._default_params, **openai_creds}

def _get_invocation_params(
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/solar.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
@deprecated( # type: ignore[arg-type]
since="0.0.34", removal="1.0", alternative_import="langchain_upstage.ChatUpstage"
)
class SolarChat(SolarCommon, ChatOpenAI):
class SolarChat(SolarCommon, ChatOpenAI): # type: ignore[override, override]
"""Wrapper around Solar large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``SOLAR_API_KEY`` set with your API key.
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/vertexai.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def _get_question(messages: List[BaseMessage]) -> HumanMessage:
removal="1.0",
alternative_import="langchain_google_vertexai.ChatVertexAI",
)
class ChatVertexAI(_VertexAICommon, BaseChatModel):
class ChatVertexAI(_VertexAICommon, BaseChatModel): # type: ignore[override]
"""`Vertex AI` Chat large language models API."""

model_name: str = "chat-bison"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ def _yield_paths(self) -> Iterable["AnyPath"]:
yield self.path
return

paths = self.path.glob(self.glob)
paths = self.path.glob(self.glob) # type: ignore[attr-defined]
for path in paths:
if self.exclude:
if any(path.match(glob) for glob in self.exclude):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,4 +219,4 @@ def __init__(
def _get_elements(self) -> List:
from unstructured.partition.csv import partition_csv

return partition_csv(filename=self.file_path, **self.unstructured_kwargs)
return partition_csv(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
6 changes: 3 additions & 3 deletions libs/community/langchain_community/document_loaders/email.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,16 +60,16 @@ def __init__(
def _get_elements(self) -> List:
from unstructured.file_utils.filetype import FileType, detect_filetype

filetype = detect_filetype(self.file_path)
filetype = detect_filetype(self.file_path) # type: ignore[arg-type]

if filetype == FileType.EML:
from unstructured.partition.email import partition_email

return partition_email(filename=self.file_path, **self.unstructured_kwargs)
return partition_email(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
elif satisfies_min_unstructured_version("0.5.8") and filetype == FileType.MSG:
from unstructured.partition.msg import partition_msg

return partition_msg(filename=self.file_path, **self.unstructured_kwargs)
return partition_msg(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
else:
raise ValueError(
f"Filetype {filetype} is not supported in UnstructuredEmailLoader."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,4 @@ def _get_elements(self) -> List:
)
from unstructured.partition.epub import partition_epub

return partition_epub(filename=self.file_path, **self.unstructured_kwargs)
return partition_epub(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
Original file line number Diff line number Diff line change
Expand Up @@ -48,4 +48,4 @@ def __init__(
def _get_elements(self) -> List:
from unstructured.partition.xlsx import partition_xlsx

return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs)
return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
2 changes: 1 addition & 1 deletion libs/community/langchain_community/document_loaders/git.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def lazy_load(self) -> Iterator[Document]:

file_path = os.path.join(self.repo_path, item.path)

ignored_files = repo.ignored([file_path])
ignored_files = repo.ignored([file_path]) # type: ignore[arg-type]
if len(ignored_files):
continue

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,4 @@ class UnstructuredHTMLLoader(UnstructuredFileLoader):
def _get_elements(self) -> List:
from unstructured.partition.html import partition_html

return partition_html(filename=self.file_path, **self.unstructured_kwargs)
return partition_html(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,4 @@ class UnstructuredImageLoader(UnstructuredFileLoader):
def _get_elements(self) -> List:
from unstructured.partition.image import partition_image

return partition_image(filename=self.file_path, **self.unstructured_kwargs)
return partition_image(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
Loading

0 comments on commit 600b7bd

Please sign in to comment.